def vpls_add_delete(loops=1): loop = 1 result = 'PASS' f_node = testbed_data.wbx_spine_2 log_file.info("") log_file.info("Bounce node %s vpls %s sap %s %s times" % (f_node.sysname, f_node.vpls_5g.id, f_node.vpls_5g.sap_to_hub_1, loops)) while not loop > loops: log_file.info("Loop %s of %s" % (loop, loops)) f_node.send_cli_command( "/configure service vpls %s customer 1 create no shutdown" % ('999')) f_node.send_cli_command( "/configure service vpls %s sap %s create no shutdown" % ('999', 'lag-47:999')) f_node.send_cli_command("/configure service vpls %s sap %s shutdown" % ('999', 'lag-47:999')) f_node.send_cli_command("/configure service vpls %s no sap %s" % ('999', 'lag-47:999')) f_node.send_cli_command("/configure service vpls %s shutdown" % ('999')) f_node.send_cli_command("/configure service no vpls %s" % ('999')) utils.countdown(1) loop += 1 log_file.info("") return result
def create(folder_path: str, split_at: int = 20): """ Starts creating the dataset in folder on gameplay """ utils.countdown() folder_no = 1 file_no = 1 # create dataset group folder if not os.path.exists(folder_path): os.mkdir(folder_path) while True: # screenshot img = grab() if img is None: break # path if file_no > split_at: folder_no += 1 file_no = 1 # folder path current_path = os.path.join(folder_path, str(folder_no)) if not os.path.exists(current_path): os.mkdir(current_path) current_path = os.path.join(current_path, str(file_no) + ".jpg") file_no += 1 print(f"{config.CC_OKCYAN}Written: %s{config.CC_ENDC}", current_path) cv2.imwrite(current_path, img) time.sleep(config.WAIT_FOR_NEXT_FRAME)
def vpls_add_delete(loops=1): loop = 1 result = 'PASS' log_file.info("") log_file.info("On node %s add / delete a vpls %s times" % (tb.dut_a.sysname, loops)) while not loop > loops: log_file.info("Loop %s of %s" % (loop, loops)) tb.dut_a.send_cli_command( "/configure service vpls %s customer 1 create no shutdown" % ('999')) tb.dut_a.send_cli_command( "/configure service vpls %s sap %s create no shutdown" % ('999', '1/1/c53/1:999')) tb.dut_a.send_cli_command( "/configure service vpls %s sap %s shutdown" % ('999', '1/1/c53/1:999')) tb.dut_a.send_cli_command("/configure service vpls %s no sap %s" % ('999', '1/1/c53/1:999')) tb.dut_a.send_cli_command("/configure service vpls %s shutdown" % ('999')) tb.dut_a.send_cli_command("/configure service no vpls %s" % ('999')) utils.countdown(1) loop += 1 log_file.info("") return result
def vpls_add_delete_many(loops=1): loop = 1 result = 'PASS' vpls_num = 9500 sap_num = 3500 f_node = testbed_data.wbx_spine_2 log_file.info("") log_file.info("Bounce node %s vpls %s sap %s %s times" % (f_node.sysname, f_node.vpls_5g.id, f_node.vpls_5g.sap_to_hub_1, loops)) while not loop > loops: vpls_num = 9500 sap_num = 10 vpls_num = vpls_num + loop sap_num = sap_num + loop sap = 'lag-47:' + str(sap_num) log_file.info("Loop %s of %s" % (loop, loops)) log_file.info("VPLS %s" % (vpls_num)) log_file.info("SAP %s" % (sap)) f_node.send_cli_command( "/configure service vpls %s customer 1 create no shutdown" % (vpls_num)) f_node.send_cli_command( "/configure service vpls %s sap %s create no shutdown" % (vpls_num, sap)) #utils.countdown(1) loop += 1 log_file.info("") loop = 1 log_file.info("Done!") utils.countdown(30) while not loop > loops: vpls_num = 9500 sap_num = 10 vpls_num = vpls_num + loop sap_num = sap_num + loop sap = 'lag-47:' + str(sap_num) log_file.info("Loop %s of %s" % (loop, loops)) log_file.info("VPLS %s" % (vpls_num)) log_file.info("SAP %s" % (sap)) f_node.send_cli_command("/configure service vpls %s sap %s shutdown" % (vpls_num, sap)) f_node.send_cli_command("/configure service vpls %s no sap %s" % (vpls_num, sap)) f_node.send_cli_command("/configure service vpls %s shutdown" % (vpls_num)) f_node.send_cli_command("/configure service no vpls %s" % (vpls_num)) #utils.countdown(1) loop += 1 log_file.info("") return result
def __call__(self, imbalance=[], prev=None, verbose=1, **kwargs) -> "Move": """Tells me which pose I'm supposed to do and how I'm supposed to do it. Also figures out next pose and deals with adding late moves""" print("\n" + colorama.Style.BRIGHT + self.title + colorama.Style.NORMAL) # Deal with imbalances if self.side: if self in imbalance: imbalance.remove(self) else: imbalance.append(self.otherside) if verbose >= 2: print(colorama.Fore.BLUE + utils.wrapper.fill('Prev: ' + '; '.join(map(str, prev)))) print(colorama.Fore.MAGENTA + utils.wrapper.fill('Imbalances (%d): %s' % (len(imbalance), '; '.join(map(str,imbalance))))\ + colorama.Fore.RESET) if prev is not None: prev.append(self) # What is my next move? if 'nextMove' in kwargs: # Assume the caller knows what they're doing right now. # Should possibly assert that nextMove is a plausible nextMove nextMove = kwargs['nextMove'] self.promoteLate(nextMove) else: for i in imbalance: if i in self.nextMove: nextMove = i break else: nextMove = self.notLast(prev) if nextMove is not None: print('Next Move: ' + nextMove.title) self.last = nextMove if verbose >= 1: print(colorama.Fore.CYAN + utils.wrapper.fill('My options were: ' + '; '.join(str(i) for i in self.nextMove))) print(colorama.Fore.GREEN + utils.wrapper.fill('Latemoves: ' + '; '.join(str(i) for i in self.lateMove)) + colorama.Fore.RESET) # Tell me what to do utils.speak(self.audio) time.sleep(0.3) for i in ('early', 'harder'): if i in kwargs and kwargs[i]: utils.speak(getattr(self, i, None)) # How long am I supposed to do it? if 'time' in kwargs: t = kwargs['time'] elif 'extended' in kwargs and kwargs['extended'] and self.extended_time: t = random.choice(self.extended_time) else: t = self.time # Actually count down if getattr(self, 'bind', None): utils.speak("Bind if you want to") if t > 5: utils.speak(str(t) + " seconds") if getattr(self, 'countdown', None): utils.countdown(t, incremental=True) else: utils.countdown(t) #record to file, if we were given a file if 'f' in kwargs and kwargs['f']: kwargs['f'].write('%s: %d' % (self.title, t)) s = self.repCount() if s: kwargs['f'].write(' - %s reps' % s) kwargs['f'].write('\n') kwargs['f'].flush() if getattr(self, 'bind', None): utils.speak('Release bind') self.promoteLate() # Add in options for harder followup moves next time return nextMove
def vpls_add_delete_many(loops=1): loop = 1 result = 'PASS' vpls_num = 9500 sap_num = 3500 log_file.info("") log_file.info("On node %s add / delete a vpls %s times" % (tb.dut_a.sysname, loops)) while not loop > loops: vpls_num = 9500 sap_num = 10 vpls_num = vpls_num + loop sap_num = sap_num + loop sap = '1/1/c53/1:' + str(sap_num) log_file.info("Loop %s of %s" % (loop, loops)) log_file.info("VPLS %s" % (vpls_num)) log_file.info("SAP %s" % (sap)) tb.dut_a.send_cli_command( "/configure service vpls %s customer 1 create no shutdown" % (vpls_num)) tb.dut_a.send_cli_command( "/configure service vpls %s sap %s create no shutdown" % (vpls_num, sap)) loop += 1 log_file.info("") loop = 1 log_file.info("Done!") utils.countdown(30) while not loop > loops: vpls_num = 9500 sap_num = 10 vpls_num = vpls_num + loop sap_num = sap_num + loop sap = '1/1/c53/1:' + str(sap_num) log_file.info("Loop %s of %s" % (loop, loops)) log_file.info("VPLS %s" % (vpls_num)) log_file.info("SAP %s" % (sap)) tb.dut_a.send_cli_command( "/configure service vpls %s sap %s shutdown" % (vpls_num, sap)) tb.dut_a.send_cli_command("/configure service vpls %s no sap %s" % (vpls_num, sap)) tb.dut_a.send_cli_command("/configure service vpls %s shutdown" % (vpls_num)) tb.dut_a.send_cli_command("/configure service no vpls %s" % (vpls_num)) #utils.countdown(1) loop += 1 log_file.info("") return result
def vpls_sap_add_delete(loops = 1): loop = 1 result = 'PASS' log_file.info("") log_file.info("Bounce node %s vpls %s sap %s %s times" %(tb.dut_a.sysname,tb.dut_a.vpls_1.id,tb.dut_a.vpls_1.sap_1,loops)) while not loop > loops: log_file.info("Loop %s" %(loop)) tb.dut_a.send_cli_command("/configure service vpls %s sap %s create no shutdown" %(tb.dut_a.vpls_1.id,'1/1/c53/1:999')) tb.dut_a.send_cli_command("/configure service vpls %s sap %s shutdown" %(tb.dut_a.vpls_1.id,'1/1/c53/1:999')) tb.dut_a.send_cli_command("/configure service vpls %s no sap %s" %(tb.dut_a.vpls_1.id,'1/1/c53/1:999')) utils.countdown(1) loop +=1 log_file.info("") return result
def reboot_hv(loops=1): loop = 1 result = 'PASS' f_node = testbed_data.wbx_spine_2 c_node = testbed_data.el_1 log_file.info("") log_file.info("Perform a hypervisor reboot %s times" % (loops)) while not loop > loops: log_file.info("Loop %s of %s" % (loop, loops)) f_node.wbx_hv_reboot(True) f_node.close() if c_node.to_wbx_spine_2.wait_port_oper_down_ex(30): log_file.info("EL 1 sees port to WBX SPINE 2 go down") else: log_file.error("EL 1 never saw port to WBX SPINE 2 go down") result = 'FAIL' utils.countdown(5) if f_node.wait_node_up(300): log_file.info("WBX VM responds to ping OK") log_file.info("But node is not fully back up yet") else: log_file.error("Node did not come back up after reboot") result = 'FAIL' log_file.info("Wait for connected EL1 to see WBX port come up") if c_node.to_wbx_spine_2.wait_port_oper_up_ex(300): log_file.info("EL 1 sees port to WBX come back up") else: log_file.error("EL 1 did not see port to WBX come back up") result = 'FAIL' loop += 1 log_file.info("") if result == 'FAIL': return result return result
def reboot_node(loops=1): loop = 1 result = 'PASS' f_node = testbed_data.wbx_spine_2 c_node = testbed_data.el_1 log_file.info("") log_file.info("Perform a reboot on node %s %s times" % (f_node.sysname, loops)) while not loop > loops: log_file.info("Loop %s of %s" % (loop, loops)) f_node.sr_reboot() f_node.close() if c_node.to_wbx_spine_2.wait_port_oper_down_ex(30): log_file.info("EL 1 sees port to WBX SPINE 2 go down") else: log_file.error("EL 1 never saw port to WBX SPINE 2 go down") result = 'FAIL' utils.countdown(5) if f_node.wait_node_up(300): log_file.info("WBX SPINE 2 responds to ping OK") log_file.info("But node is not fully back up yet") else: log_file.error("Node did not come back up after reboot") log_file.info("Wait for connected CRAN Hub to see WBX port come up") if c_node.to_wbx_spine_2.wait_port_oper_up_ex(300): log_file.info("EL 1 sees port to WBX SPINE 2 come back up") else: log_file.error("Hub 1 did not see port to WBX 89 come back up") result = 'FAIL' f_node.send_cli_command("show time") loop += 1 log_file.info("") return result
def reboot_node(loops=1): loop = 1 result = 'PASS' log_file.info("") log_file.info("Perform a reboot on node %s %s times" % (tb.dut_a.sysname, loops)) while not loop > loops: log_file.info("Loop %s of %s" % (loop, loops)) tb.dut_a.sr_reboot() tb.dut_a.close() if tb.dut_b.local_port_1.wait_port_oper_down_ex(30): log_file.info("DUT-B sees port to DUT-A go down") else: log_file.error("DUT-B never saw port to DUT-A go down") result = 'FAIL' utils.countdown(5) if tb.dut_a.wait_node_up(300): log_file.info("DUT-A responds to ping OK") log_file.info("But node is not fully back up yet") else: log_file.error("Node did not come back up after reboot") log_file.info("Wait for DUT-B to see port to DUT-A come up") if tb.dut_b.local_port_1.wait_port_oper_up_ex(300): log_file.info("DUT-B sees port to DUT-A come back up") else: log_file.info("DUT-B did not see port to DUT-A come back up") result = 'FAIL' tb.dut_a.send_cli_command("show time") loop += 1 log_file.info("") return result
def run(): reddit = RedditBot() while True: reddit.run() countdown(1)
# Setup AWS s3 = boto3.client('s3') """:type: pyboto3.s3""" bucket = 'pi-demo-raw' key = 'jpg/{FileName}.jpg'.format(FileName=file_name) def upload_to_s3(stream): logger.info("Uploading to S3...") stream.seek(0) s3.put_object(Bucket=bucket, Key=key, Body=stream.read()) logger.info("Upload complete") storage_handler = {'s3': upload_to_s3}[args.storage_handler] stream = io.BytesIO() # Init the camera with picamera.PiCamera() as camera: try: camera.resolution = resolution camera.start_preview() countdown(delay, "Snap!") camera.capture(stream, 'jpeg', quality=quality) storage_handler(stream) except Exception: logger.error("Snap Failed") logger.error(traceback.format_exc()) finally: logger.info("Exiting. Have a nice day!")
if os.path.isfile(MAIN_DB): size = os.path.getsize(MAIN_DB) log.info('db size: ' + str(bytesto(size, 'm'))) else: size = 0 if size < limit: # learn faster early on log.info('fast learning') learn() try: log.info('new db size: ' + str(bytesto(os.path.getsize(MAIN_DB), 'm'))) except: pass countdown(5) if size > limit: # once we learn enough start submissions and replies log.info('database size is big enough') if prob(0.02): # 2% chance we reply to someone reddit.random_reply() if prob(0.00): # 1% chance we make a random submission log.info('making a submission') reddit.random_submission() if prob(0.10): #25% chance we'll learn more log.info('going to learn') learn() # Wait 10 minutes to comment and post because of reddit rate limits
def main(**kwargs): defaults = { "time": 30, "difficulty": 1, "initial_move": "child", "warmup": True, "cooldown": True, "aerobics": 0, "strength": 0, "target": "plank", "verbose":1, "memory":5, "outfile": None, } defaults.update(kwargs) if defaults["outfile"]: f = open(defaults["outfile"], "a") f.write(str(datetime.datetime.now()) + "\n") f.write(str(defaults) + "\n") else: f = None utils.speak('Beginning in') utils.countdown(3) if defaults["verbose"] >= 2: print(utils.wrapper.fill(str(defaults))) elif defaults["verbose"] >= 1: print("Workout length:", defaults['time'], "minutes.", "Beginning in:") # setup total_time = defaults['time']*60 movesGraph = moves.generateMoves(difficulty=defaults["difficulty"]) stretches.defineStretches(movesGraph, difficulty=defaults["difficulty"]) start = time.time() end = start + total_time class Times(Enum): warmup_end = start + min(max(45,total_time//15),300) halfway = start + total_time//2.4 - 30 cooldown_begin = (end - max(60, total_time//5)) if defaults['cooldown'] else end imbalance = [] prev = collections.deque([],defaults["memory"]) try: pose = movesGraph[defaults['initial_move']] except KeyError: pose = movesGraph['child'] try: #warmup if defaults["warmup"]: pose = pose(time=min(30,max(15, total_time//120+7)), imbalance=imbalance, prev=prev, verbose=defaults["verbose"], f=f) while time.time() < Times.warmup_end.value: pose = pose(imbalance=imbalance, extended=True, early=True, prev=prev, verbose=defaults["verbose"], f=f) #start slower #get me to my target: moves.linkMain(movesGraph, defaults['difficulty']) if defaults['aerobics']: strengthaerobics.linkAerobics(movesGraph, defaults["difficulty"], defaults["aerobics"]) if defaults['strength']: strengthaerobics.linkStrength(movesGraph, defaults["difficulty"], defaults["strength"]) if defaults['aerobics']: strengthaerobics.linkStrengthAerobics(movesGraph, defaults["difficulty"], defaults["strength"], defaults["aerobics"]) if defaults['warmup']: pose = fixImbalance(pose, imbalance, maxTime=max(45,total_time//12.5), prev=prev, verbose=defaults['verbose'], f=f) imbalance = moves.unlinkWarmup(movesGraph, imbalance=imbalance, difficulty=defaults["difficulty"]) try: target = movesGraph[defaults['target']] except KeyError: target = movesGraph['plank'] pose = get_me_to(pose, target, imbalance=imbalance, playLast=False, prev=prev, verbose=defaults["verbose"], f=f) if defaults["warmup"]: utils.tee("Warmup Over: " + utils.prettyTime(time.time() - start), f, say="Alright, warmup over.") pose = pose(imbalance=imbalance, prev=prev, verbose=defaults["verbose"], f=f) #starting main part of workout while time.time() < Times.halfway.value: pose = fixImbalance(pose, imbalance, maxImbalance=10 + total_time//600, maxTime=max(60,total_time//12), prev=prev, verbose=defaults["verbose"], f=f) pose = pose(imbalance=imbalance, prev=prev, verbose=defaults["verbose"], f=f) #add harder poses in here if defaults["difficulty"] >= 1: moves.linkHarder(movesGraph, defaults["difficulty"]) if defaults["strength"]: strengthaerobics.linkStrengthHarder(movesGraph, defaults["difficulty"], defaults["strength"]) pose = fixImbalance(pose, imbalance, maxTime=max(60, total_time//10), prev=prev, verbose=defaults["verbose"], f=f) try: pose = get_me_to(pose, movesGraph[defaults['target']], imbalance=imbalance, prev=prev, verbose=defaults["verbose"], f=f) except KeyError: pass if defaults["verbose"] >= 1: utils.tee("Halfway point: " + utils.prettyTime(time.time()-start), f, say="We have reached the halway point") #end adding harder poses harder = defaults["difficulty"] >= 1 while time.time() < Times.cooldown_begin.value: extendedChance = (time.time() - start)/total_time extended = random.random() < extendedChance pose = fixImbalance(pose, imbalance, maxImbalance=8+total_time//800, maxTime=max(110,total_time//10), prev=prev, verbose=defaults["verbose"], \ f=f, harder=harder) pose = pose(harder=harder, imbalance = imbalance, extended=extended, prev=prev, verbose=defaults["verbose"], f=f) moves.linkEnding(movesGraph) while time.time() < (end - max(60, total_time//10)): pose = fixImbalance(pose, imbalance, maxImbalance=max(1,total_time//800), maxTime=max(120, total_time//8), prev=prev, verbose=defaults["verbose"], f=f) pose = pose(imbalance = imbalance, prev=prev, verbose=defaults["verbose"], f=f) if defaults["cooldown"]: pose = fixImbalance(pose, imbalance, maxImbalance=1, maxTime=max(75, total_time//10+15), prev=prev, verbose=defaults["verbose"], f=f) utils.tee("Cooldown begins: " + utils.prettyTime(time.time() - start), f, say="Cooldown begins") stretches.linkCooldown(movesGraph, difficulty=defaults["difficulty"]) if defaults["strength"]: strengthaerobics.linkStrengthCooldown(movesGraph,difficulty=defaults["difficulty"], strength = defaults["strength"]) if defaults["aerobics"]: strengthaerobics.linkAerobicsCooldown(movesGraph,difficulty=defaults["difficulty"], aerobics = defaults["aerobics"]) pose = get_me_to(pose, movesGraph['wheel'], imbalance=imbalance, prev=prev, verbose=defaults['verbose'], f=f) pose = fixImbalance(pose, imbalance, maxImbalance=1, maxTime=max(60, total_time//10), prev=prev, verbose=defaults['verbose'], f=f) while time.time() < (end-max(30, total_time//10)) if defaults["cooldown"] else end: #pose = pose(imbalance=imbalance, extended=True, prev=prev, verbose=defaults['verbose'], f=f) pose = fixImbalance(pose, imbalance, maxImbalance=1, maxTime=max(30, total_time//10), prev=prev, verbose=defaults['verbose'], f=f) if defaults['cooldown']: moves.linkSavasana(movesGraph, difficulty=defaults['difficulty']) pose = fixImbalance(pose, imbalance, maxImbalance=1, maxTime=max(30, total_time//10), prev=prev, verbose=defaults['verbose'], f=f) pose = get_me_to(pose, movesGraph['savasana'], imbalance=imbalance, prev=prev, verbose=defaults['verbose'], f=f) except (KeyboardInterrupt, BrokenPipeError): moves.linkSavasana(movesGraph, difficulty=defaults['difficulty']) pose = get_me_to(pose, movesGraph['savasana'], imbalance=imbalance, prev=prev, verbose=defaults['verbose'], f=f) finally: final_time = utils.prettyTime(time.time() - start) utils.tee('\nTotal Time: %s' % final_time, f=f, say='Done! Total time was %s' % final_time.replace('(','').replace(')','')) if f: f.close() sys.stdout.write(colorama.Style.RESET_ALL) print(utils.wrapper.fill(str(imbalance))) return imbalance
def live(): """ Continuously redicts lanes in the game screen on gameplay """ args = args_setting() torch.manual_seed(args.seed) use_cuda = args.cuda and torch.cuda.is_available() device = torch.device("cuda" if use_cuda else "cpu") map_location = "cpu" if torch.cuda.is_available(): map_location = lambda storage, loc: storage.cuda() # turn image into floatTensor op_tranforms = transforms.Compose([transforms.ToTensor()]) # load model and weights print(f"{config.CC_OKCYAN}Loading model... {config.CC_ENDC}", end="") model = generate_model(args) # class_weight = torch.Tensor(config.CLASS_WEIGHT) # criterion = torch.nn.CrossEntropyLoss(weight=class_weight).to(device) pretrained_dict = torch.load(config.PRETRAINED_PATH, map_location=map_location) #.to(device) model_dict = model.state_dict() pretrained_dict_1 = { k: v for k, v in pretrained_dict.items() if (k in model_dict) } model_dict.update(pretrained_dict_1) model.load_state_dict(model_dict) print(f"{config.CC_OKGREEN}LOADED{config.CC_ENDC}") signal.signal(signal.SIGINT, signal_handler) # exit signal check print( f"Mode: {config.CC_BOLD}{config.CC_OKBLUE}%d{config.CC_ENDC}{config.CC_ENDC}" % args.mode) print(f"Continuous: {config.CC_BOLD}%r{config.CC_ENDC}" % args.continuous) if args.mode == 0: # force output due to mode args.out = True countdown() while True: img = grab() frame = fne(PATH + FILE) cv2.imwrite(frame, img) # load data for batches, num_workers for multiprocess test_loader = torch.utils.data.DataLoader( SingleDataset(frame, transforms=op_tranforms), batch_size=args.test_batch_size, shuffle=False, num_workers=1, ) # continuous output model.eval() with torch.no_grad(): for sample_batched in test_loader: data, _ = ( sample_batched["data"].to(device), sample_batched["label"].type(torch.LongTensor).to(device), ) output, _ = model(data) pred = output.max(1, keepdim=True)[1] # save first matrix in the tensor to a text file if len(pred) > 0 and len(pred[0]) > 0 and args.out: np.savetxt(fna(frame, "matrix", "txt"), pred[0][0].cpu().numpy(), fmt="%d") # save predicted image if args.out: img = (torch.squeeze(pred).cpu().unsqueeze(2).expand( -1, -1, 3).numpy() * 255) img = Image.fromarray(img.astype(np.uint8)) predicted = fna(frame, "pred") img.save(predicted) # mode if args.mode == 0: hough_lines_p(pred, frame) elif args.mode == 1: matrix(pred[0][0].cpu().numpy(), args.out, pred, frame) else: # TODO # add other methods to predict steering time, sensitivity and direction pass if not args.continuous: break time.sleep(config.WAIT_FOR_NEXT_FRAME)
def init(): log.info("db size size to start replying:" + str(bytesto(MAIN_DB_MIN_SIZE, "m"))) reddit.shadow_check() # check if this is the first time running the bot set_user_info() check_first_run() set_db_size() while True: if get_db_size( ) < MAIN_DB_MIN_SIZE and not COMMENTS_DISABLED: # learn faster early on log.info(""" THE BOT IS WORKING. IT WILL TAKE ABOUT 8 HOURS FOR IT TO LEARN AND START COMMENTING. """) log.info("fast learning") learn() try: log.info("new db size: " + str(bytesto(get_db_size(), "m"))) except: pass set_db_size() countdown(2) if (get_db_size() > MAIN_DB_MIN_SIZE or COMMENTS_DISABLED ): # once we learn enough start submissions and replies log.info("database size is big enough") if USE_SLEEP_SCHEDULE: while should_we_sleep(): log.info("zzzzzzzz :snore:") time.sleep(60) for action in reddit_bot: if action.rate_limit_unlock_epoch != 0: if action.rate_limit_unlock_epoch > get_current_epoch(): log.info( "{} hit RateLimit recently we need to wait {} seconds with this" .format( action.name, action.rate_limit_unlock_epoch - get_current_epoch(), )) continue else: action._replace(rate_limit_unlock_epoch=0) else: if prob(action.probability): log.info("making a random {}".format(action.name)) try: action.action() except praw.exceptions.APIException as e: secs_to_wait = get_seconds_to_wait(str(e)) action._replace( rate_limit_unlock_epoch=(get_current_epoch() + secs_to_wait)) log.info( "{} hit RateLimit, need to sleep for {} seconds" .format(action.name, secs_to_wait)) except Exception as e: log.error("something weird happened, {}".format(e), exc_info=True) if prob(PROBABILITIES["LEARN"]): # chance we'll learn more log.info("going to learn") learn() # Wait 10 minutes to comment and post because of reddit rate limits countdown(1) log.info("end main loop")
def run(init_iter, display_total_earn=True, model=None): init_iter = int(init_iter) display_total_earn = display_total_earn in ['True'] print('init_iter:', init_iter, type(init_iter)) import os import sys sys.path.append(os.getcwd()) import utils from data import IQOption iq = IQOption(goal='EURUSD', size=60, maxdict=3, money=1, expiration_mode=1, account='PRACTICE') init_balance = iq.get_balance() print('init_balance =', init_balance) #init_iter = 60*1 iter = init_iter #test() #model = Net(12, 2) model = LSTM_Net(input_dim=4, hidden_dim=100, batch_size=1, output_dim=2, num_layers=2).to(device) #model = LSTM_Net2(input_dim=4, hidden_dim=100, batch_size=1, #output_dim=2, num_layers=2).to(device) if str(device) == 'cpu': #model.load_state_dict(torch.load('strategies/model_weights/model.pth', map_location=torch.device('cpu'))) model.load_state_dict( torch.load('strategies/model_weights/model_lstm.pth', map_location=torch.device('cpu'))) else: model.load_state_dict(torch.load('strategies/model_weights/model.pth')) print(f'\n{model}\n') wins = 0 total = 0 total_earn = 0 y_pred = [] y_true = [] while iter > 0: ''' iq.reconnect_after_10_minutes() utils.countdown() data = iq.get_candles() d = np.array(data).astype(np.float32) if d.shape[0] > 3: d = d[-3:] d = processx(d) d = MinMaxScaler(d) #pred = model.predict(d)[0] with torch.no_grad(): outputs = model(torch.Tensor(d)) _, predicted = torch.max(outputs.data, 1) pred = predicted.item() outputs = outputs[0] confs = F.softmax(outputs,0) print(f'confidence: | confs[pred] | {confs[0]:.4f}:{confs[1]:.4f} | {abs(confs[0]-confs[1]):.4f} | ', end='') print(f'{utils.now()} | {init_iter-iter:3d} | ', end='') if pred == 0: # trade normal #if pred == 1: # trade opposite print(f' put', end=' ') result, earn = iq.buy('put', check_result=True) else: print(f'call', end=' ') result, earn = iq.buy('call', check_result=True) if result == 'win': wins += 1 if result != 'equal': if pred == 0: y_pred.append('put') if result == 'win': y_true.append('put') else: y_true.append('call') else: y_pred.append('call') if result == 'win': y_true.append('call') else: y_true.append('put') total += 1 total_earn += earn print(f'| {result:>5} | ', end='') print(f'{str(round(earn,2)):>5} | ', end='') print(f'accuracy: {wins/total:.2f} | ', end='') print(f'total earn: {str(round(total_earn,2)):>6}') iter -= 1 ''' if 1: iq.reconnect_after_10_minutes() utils.countdown() data = iq.get_candles() d = np.array(data).astype(np.float32) if d.shape[0] > 3: d = d[-3:] d = processx(d) d = MinMaxScaler(d) #pred = model.predict(d)[0] with torch.no_grad(): outputs = model(torch.Tensor(d)) _, predicted = torch.max(outputs.data, 1) pred = predicted.item() confs = F.softmax(outputs[0], 0) print(f'{utils.now()} | {init_iter-iter:3d} | ', end='') print(f'confidence: {confs[pred]:.6f} | ', end='') #if confs[pred] == 1: #iq.money = 10 #else: #iq.money = 1 if pred == 0: # trade normal #if pred == 1: # trade opposite print(f' put', end=' ') result, earn = iq.buy('put', check_result=True) else: print(f'call', end=' ') result, earn = iq.buy('call', check_result=True) if result == 'win': wins += 1 if result != 'equal': if pred == 0: y_pred.append('put') if result == 'win': y_true.append('put') else: y_true.append('call') else: y_pred.append('call') if result == 'win': y_true.append('call') else: y_true.append('put') total += 1 print(f'| {result:>5} | ', end='') print(f'{str(round(earn,2)):>6} | ', end='') print(f'accuracy: {wins/total:.2f} | ', end='') if display_total_earn: total_earn += earn print(f'total earn: {str(round(total_earn,2)):>6}') else: print() iter -= 1 ''' except TypeError: print('\n') cm = confusion_matrix(y_pred, y_true, labels=['put', 'call'], normalize='all') df = pd.DataFrame(cm, columns=['true_put', 'true_call'], index=['pred_put', 'pred_call']) final(iq, init_balance) sys.exit() except KeyboardInterrupt: print('\n') cm = confusion_matrix(y_pred, y_true, labels=['put', 'call'], normalize='all') df = pd.DataFrame(cm, columns=['true_put', 'true_call'], index=['pred_put', 'pred_call']) final(iq, init_balance) sys.exit() except: print('ERROR') pass ''' print('\n') cm = confusion_matrix(y_pred, y_true, labels=['put', 'call'], normalize='all') df = pd.DataFrame(cm, columns=['true_put', 'true_call'], index=['pred_put', 'pred_call']) print() print(df) final(iq, init_balance)
def qos_1(q_port, ixia_pattern): ixia_100g = testbed_data.ixia_100g log_file.info('qos 1') # Set the ixia pattern based on the test ixia_100g.set_traffic(pattern=ixia_pattern, commit=True) log_file.info("Start Ixia Traffic Stream %s" % (ixia_100g.traffic_names)) ixia_100g.start_traffic() ixia_100g.clear_stats() utils.countdown(30) log_file.info("Stop Ixia Traffic Stream %s" % (ixia_100g.traffic_names)) ixia_100g.stop_traffic() log_file.info("") log_file.info("Get Ixia Stats") log_file.info("--------------") ixia_100g.set_stats() log_file.info("") for traffic_item in ixia_100g.traffic_names: ixia_100g.get_stats(traffic_item, 'loss%') dd_opt = 'Drill down per IPv6 :Traffic Class' drill_down_name = traffic_item + '/' + dd_opt ixia_100g.set_user_def_drill_stats(target_name=traffic_item, ddopt=dd_opt) tc_list = ixia_100g.user_stats[drill_down_name].keys() tc_list.remove('columnCaptions') for tc in tc_list: loss = ixia_100g.get_user_def_drill_stats(traffic_item, tc, 'loss_ms', ddopt=dd_opt) if tc != '0': if loss > 0: log_file.error( "Traffic Class %s has a loss of %s - expecting 0" % (tc, loss)) else: log_file.info( "Traffic Class %s has a loss of %s - expecting 0" % (tc, loss)) else: log_file.info("Traffic Class %s has a loss of %s" % (tc, loss)) q_dict = {} if isinstance(q_port, node.Lag): for key, value in q_port.port_dict.iteritems(): q_dict[key] = value.get_network_egress_dropped() else: q_dict = q_port.get_network_egress_dropped() log_file.info("") if isinstance(q_port, node.Lag): for key in sorted(q_port.port_dict.keys()): for k2 in q_dict[key].keys(): v2 = q_dict[key][k2] log_file.info("Port %s : queue %s dropped = %s" % (key, k2, v2))
def upgrade_node(bofsave=False, cloud_init='v2', new_onie=None, loops=1): loop = 1 result = 'PASS' f_node = tb.wbx c_node = tb.hub_1 log_file.info("") log_file.info("Perform a node upgrade %s times" % (loops)) log_file.info("") log_file.info("********************************") log_file.info("") log_file.info("With cloud-init.cfg version = %s" % (cloud_init)) log_file.info("") log_file.info("********************************") while not loop > loops: log_file.info("Loop %s of %s" % (loop, loops)) if bofsave: log_file.info("OK to save bof") log_file.info("Execute a bof save") if not (f_node.bof_save()): log_file.error("bof save failed") result = 'FAIL' if not (f_node.check_bof_save()): result = 'FAIL' log_file.error("bof save failed") else: log_file.info("DON'T save bof") log_file.info("Execute an admin save") if not (f_node.admin_save()): log_file.error("admin save failed") result = 'FAIL' if not (f_node.check_admin_save()): log_file.error("admin save failed") result = 'FAIL' log_file.info("Upgrade WBX") log_file.info("Cloud init version = %s" % (cloud_init)) log_file.info("") if not f_node.wbx_hv_upgrade(cloud_init, new_onie): result == 'FAIL' f_node.close() if c_node.to_wbx.wait_port_oper_down_ex(30): log_file.info("Hub 1 sees port to WBX 89 go down") else: log_file.error("Hub 1 never saw port to WBX 89 go down") result = 'FAIL' utils.countdown(5) if f_node.wait_node_up(300): log_file.info("WBX VM responds to ping OK") log_file.info("But node is not fully back up yet") else: log_file.error("Node did not come back up after reboot") result = 'FAIL' log_file.info("Wait for connected CRAN Hub to see WBX port come up") if c_node.to_wbx.wait_port_oper_up_ex(300): log_file.info("Hub 1 sees port to WBX 89 come back up") else: log_file.error("Hub 1 did not see port to WBX 89 come back up") result = 'FAIL' loop += 1 log_file.info("") if result == 'FAIL': return result return result
def getDissonantIndexes(images, normal, framesDir): totalFrames = len(images) resetRequest = True while resetRequest: resetRequest = False dissonantIndexes = [] print("Hit SPACE for \'{}\'".format(normal.upper())) print("Press ENTER to continue") input() countdown(5) for classIndex, origIndex in tqdm(enumerate(images), total=len(images)): addFrame = False frame = cv2.imread( os.path.join(framesDir, "{}.jpg".format(origIndex)), cv2.IMREAD_COLOR) if frame is None: print("Error while openning {}.jpg".format(origIndex)) print("Exiting...") exit() cv2.imshow("", frame) if rPressed: print("Restarting step...") resetRequest = True cv2.destroyAllWindows() break timesPressedBefore = timesPressed time.sleep(MOD_DUR) if spacebarPressed: addFrame = True timesPressedAfter = timesPressed if timesPressedAfter > timesPressedBefore: addFrame = True if addFrame: dissonantIndexes.append(classIndex) cv2.waitKey(1) if classIndex == len(images) - 1: while True: print( "Would you like to reset this step? Y for Yes and N for No" ) ans = input() if ans == 'Y': print("Restarting step...") resetRequest = True cv2.destroyAllWindows() break elif ans == 'N': print("Confirmed!") break else: print("Wrong input!") finalDissonantIndexes = set() for classIndex in dissonantIndexes: left = max(0, classIndex - (FRAMES_WINDOW_SIZE)) right = min(totalFrames, classIndex + 3) finalDissonantIndexes.update([images[i] for i in range(left, right)]) return list(finalDissonantIndexes)