def draw(self): for idx in xrange(len(self.OPTS)): if idx == self.current: myprint(self.screen, self.OPTS[idx], (200, 50*idx+50)) else: myprint(self.screen, self.OPTS[idx], (200, 50*idx+50), 's', (160,160,160))
def show_pretend_loading(self): clock = pygame.time.Clock() while True: past_second = clock.tick(30) / 1000.0 for e in pygame.event.get(): if e.type == QUIT: exit() speed = 1200. self.cleanscreen() bar_length = 240 total_time = 150 myprint(self.screen, u"PRETEND TO BE LOADING", (148,200), size='enl',color=(255, 255, 255)) self.angle += past_second*speed rotated_icon = pygame.transform.rotate(self.loading_icon, self.angle) w, h = rotated_icon.get_size() draw_posi = (170-w/2, 255-h/2) self.screen.blit(rotated_icon, draw_posi) if Pre.counter < total_time: #print Pre.counter pygame.draw.rect(self.screen, (255,255,255), (200, 250, bar_length, 10), 2) self.screen.fill((255, 255, 255), (200, 250, Pre.counter*bar_length/total_time, 10)) percent_str = (str)(Pre.counter*100/total_time)+'%' myprint(self.screen, percent_str, (215+bar_length,248), color=(255, 255, 255),size='arial') Pre.counter += 1 pygame.display.update() if Pre.counter >= total_time: break return 'level1'
def addReward(self, rnnkey, reward): if self.nn_model or self._vReadOnly: #i.e. no training return assert rnnkey in self.keyedSBatch and rnnkey in self.keyedActionProb state = self.keyedSBatch[rnnkey] action_prob = self.keyedActionProb[rnnkey] action = self.keyedAction[rnnkey] myprint( "Training dataset:", { "input": self.keyedInputParam[rnnkey], "action": self._vActionset[self.keyedAction[rnnkey]], "key": rnnkey, "reward": reward, "action_prob": action_prob.tolist() }) del self.keyedSBatch[rnnkey] del self.keyedActionProb[rnnkey] del self.keyedAction[rnnkey] del self.keyedInputParam[rnnkey] self.r_batch.append(reward) self.entropy_record.append(a3c.compute_entropy(action_prob[0])) self.s_batch.append(state) action_vec = np.zeros(self.a_dim) action_vec[action] = 1 self.a_batch.append(action_vec) if len(self.r_batch) >= TRAIN_SEQ_LEN: # do training once self.saveModel()
def draw(self): #self.screen.fill((0, 0, 0)) for idx in xrange(len(self.OPTS)): if idx == self.current: myprint(self.screen, self.OPTS[idx], (200, 50 * idx + 50)) else: myprint(self.screen, self.OPTS[idx], (200, 50 * idx + 50), 's', (160, 160, 160))
def saveModel(self, s_batch, a_batch, r_batch, entropy_record, end_of_video=False): actor_gradient, critic_gradient, td_batch = \ a3c.compute_gradients(s_batch=np.stack(s_batch, axis=0), # ignore the first chuck a_batch=np.vstack(a_batch), # since we don't have the r_batch=np.vstack(r_batch), # control over it terminal=end_of_video, actor=self.actor, critic=self.critic) td_loss = np.mean(td_batch) self.actor_gradient_batch.append(actor_gradient) self.critic_gradient_batch.append(critic_gradient) myprint("====") myprint("Master: Quality: Epoch", self.epoch) myprint("TD_loss", td_loss, "Avg_reward", np.mean(r_batch), "Avg_entropy", np.mean(entropy_record)) myprint("====") summary_str = self.sess.run(self.summary_ops, feed_dict={ self.summary_vars[0]: td_loss, self.summary_vars[1]: np.mean(r_batch), self.summary_vars[2]: np.mean(entropy_record) }) self.writer.add_summary(summary_str, self.epoch) self.writer.flush() self.entropy_record = [] if len(self.actor_gradient_batch) >= GRADIENT_BATCH_SIZE: assert len(self.actor_gradient_batch) == len( self.critic_gradient_batch) for i in range(len(self.actor_gradient_batch)): self.actor.apply_gradients(self.actor_gradient_batch[i]) self.critic.apply_gradients(self.critic_gradient_batch[i]) self.actor_gradient_batch = [] self.critic_gradient_batch = [] self.epoch += 1 if self.epoch % MODEL_SAVE_INTERVAL == 0: # Save the neural net parameters to disk. save_path = self.saver.save( self.sess, self.summary_dir + "/nn_model_ep_" + str(self.epoch) + ".ckpt") myprint("Model saved in file: %s" % save_path) return self.getParams()
def draw(self): #self.screen.fill((0, 0, 0)) self.screen.blit(pygame.image.load(util.file_path("menu.jpg")).convert(), (0, 0)) for idx in xrange(len(self.OPTS)): if idx == self.current: util.myprint(self.screen, self.OPTS[idx], (280, 80 * idx + 250)) else: util.myprint(self.screen, self.OPTS[idx], (280, 80 * idx + 250), 's', (160, 160, 160))
def captureDNS(_interface='mywlanmonitor'): """Start capturing dns-packages and saves them in the database""" myprint("Starting the scan:") try: sniff(iface=_interface, prn=Network.filterPackage, filter="udp port 53", store=0) except OSError: Connection_handler.remove_connection()
def remove_subdomain(domain: str) -> str: """Trys to delet the subdomain an the given domain-string""" try: extract = tldextract.TLDExtract(suffix_list_urls=[ "file://%s/%s" % (Config.project_path, "suffix_list.dat") ]) extracted = extract(domain) return "%s.%s" % (extracted.domain, extracted.suffix) except Exception: myprint("Error while deleting subdomain of %s" % domain) return domain
def __init__(self, actionset = [], infoDept=S_LEN, infoDim=S_INFO, log_path=None, summary_dir=None, nn_model=None): assert summary_dir myprint("Central init Params:", actionset, infoDept, log_path, summary_dir, nn_model) self.summary_dir = os.path.join(summary_dir, "rnnQuality") self.nn_model = nn_model self.a_dim = len(actionset) self._vActionset = actionset self._vInfoDim = infoDim self._vInfoDept = infoDept if not os.path.exists(self.summary_dir): os.makedirs(self.summary_dir) self.sess = tf.Session() # log_file = open(os.path.join(log_path, "PensiveLearner", "wb")) self.actor = a3c.ActorNetwork(self.sess, state_dim=[self._vInfoDim, self._vInfoDept], action_dim=self.a_dim, learning_rate=ACTOR_LR_RATE) self.critic = a3c.CriticNetwork(self.sess, state_dim=[self._vInfoDim, self._vInfoDept], action_dim=self.a_dim, learning_rate=CRITIC_LR_RATE) self.summary_ops, self.summary_vars = a3c.build_summaries() self.sess.run(tf.global_variables_initializer()) self.writer = tf.summary.FileWriter(self.summary_dir, self.sess.graph) # training monitor self.saver = tf.train.Saver() # save neural net parameters self.epoch = 0 # restore neural net parameters if self.nn_model is None: nn_model, epoch = guessSavedSession(self.summary_dir) if nn_model: self.nn_model = nn_model self.epoch = epoch # nn_model = NN_MODEL if self.nn_model is not None: # nn_model is the path to file self.saver.restore(self.sess, self.nn_model) myprint("Model restored.") self.actor_gradient_batch = [] self.critic_gradient_batch = []
def drop_database() -> bool: """Trys to drop the database""" if len(Connection_handler.connections) > 1: util.myprint("Please stop the scan to drop the database") return False try: dbc = Connection_handler.getConnection() dbc.delete_all() util.myprint("Database %s cleared" % Config.db_name) except Exception: logger = getLogger("sql") logger.exception("Error while dropping the database") return False return True
def filterPackage(pkt: packet): """This method is called by every captured network-package. Saves the relevant data of a matching dns-package to database""" try: if DNS in pkt: dns_str = str(pkt[DNS].summary()) # Example dns_str: DNS Qry "b'id.google.com.' request = re.search('DNS Qry "b\'.*\'', dns_str) if request: domain = re.search('\'.*\'', request.group(0)).group(0)[1:-2] if not Config.subdomains: domain = Network.remove_subdomain(domain) src, dns_server = Network.getSrcAndDst(pkt) myprint("Domain: %s, src: %s, dns-server: %s" % (domain, src, dns_server)) dbc = Connection_handler.getConnection() dbc.addRequest(domain, dns_server, ip=src) except Exception: logger = getLogger("network") logger.exception("Fatal error in main loop")
def validate_model(model, dataset_tuples, use_cuda, log_filepath=None): accuracies = {} for i in range(len(dataset_tuples['valid'])): # get input for the model subj_name, action_name, seq_ix, joints_seq = dataset_tuples['valid'][i] joints_seq = torch.from_numpy(joints_seq).float() joints_seq = joints_seq.view(joints_seq.shape[0], 1, -1) if use_cuda: joints_seq = joints_seq.cuda() # feed forward model_output = model(joints_seq) if use_cuda: model_output = model_output.cpu() # get action outputs action_idx = dataset_tuples['action_to_idx'][action_name] model_output_numpy = model_output.data.numpy() action_idxs = np.argmax(model_output_numpy, axis=1) accuracy = np.sum(action_idxs == action_idx) / action_idxs.shape[0] accuracies[(subj_name, action_name, seq_ix)] = accuracy accuracies_all = [] for key in accuracies.keys(): myprint(str(key) + ' : ' + str(accuracies[key]), log_filepath) accuracies_all.append(accuracies[key]) myprint('Overall mean: ' + str(np.mean(accuracies_all)), log_filepath) myprint('Overall stddev: ' + str(np.std(accuracies_all)), log_filepath) return accuracies
def __init__(self, params, indexlist, phase, proc_id): super(BatchLoader, self).__init__() self.indexlist = indexlist self.proc_id = proc_id self.batch_size = params['batch_size'] self.im_shape = params['im_shape'] self.phase = phase self.queue = Queue(_QSIZE) #rec_conn, send_conn = Pipe() # self.rec_conn = rec_conn # self.send_conn = send_conn ## Dividing with rest the batch size for the jobs we have self.batch_ck_size = self.batch_size // _nJobs ## in case of the last jobs adding the rest if self.proc_id == (_nJobs - 1): self.batch_ck_size += self.batch_size % _nJobs ## Opening LMDB lmdb_output_pose_env = lmdb.Environment(params['source'] + '/pose_lmdb/', readonly=True, lock=False) self.cur_pose = lmdb_output_pose_env.begin().cursor() lmdb_output_flip_env = lmdb.Environment(params['source'] + '/flip_lmdb/', readonly=True, lock=False) self.cur_flip = lmdb_output_flip_env.begin().cursor() lmdb_output_land_env = lmdb.Environment(params['source'] + '/land_lmdb/', readonly=True, lock=False) self.cur_land = lmdb_output_land_env.begin().cursor() ################ self.Nimgs = len(self.indexlist) # this class does some simple data-manipulations #proto_data = open(params['mean_file'], "rb").read() #a = caffe.io.caffe_pb2.BlobProto.FromString(proto_data) #mean = caffe.io.blobproto_to_array(a)[0] ## mean is read BGR and c,h,w; we convert it to h,w,c. ## BGR is OK since OpenCV and caffe are BGR ## Then MySimpleTransformer will remove mean after that the image ## has been changed to BGR as well. So apple-to-apple. self.transformer = MySimpleTransformer() self.aug_tr = aug_tracker.AugmentationTracker() if params['mean_file'] is not None: mean = np.load(params['mean_file']) mean = mean.transpose(1, 2, 0) mean = np.float32(mean) self.transformer.set_mean(mean) if self.phase == 1: util.myprint("BatchLoader_valid" + str(self.proc_id) + " initialized with " + str(self.Nimgs) + " images") else: util.myprint("BatchLoader_train" + str(self.proc_id) + " initialized with " + str(self.Nimgs) + " images") util.myprint("This will process: " + str(self.batch_ck_size) + '/' + str(self.batch_size))
def run(self, level): WIDTH = 420 HEIGHT = 440 game_area = self.screen.subsurface(20, 20, 420, 440) while True: for e in pygame.event.get(): if e.type == QUIT: return 'quit' if e.type == MOUSEBUTTONUP: if self.is_over(e.pos, (WIDTH/2-70, HEIGHT/2+60)) and e.button == 1: if level == 1: return 'game1' elif level == 2: return 'game2' elif level == 3: return 'game3' if self.is_over(mouse_position, (WIDTH/2+30, HEIGHT/2+60))and e.button == 1: return 'level1' if e.type == KEYUP: if e.key == K_RETURN: if level == 1: return 'game1' elif level == 2: return 'game2' elif level == 3: return 'game3' if e.key == K_ESCAPE: return 'level1' mouse_position = pygame.mouse.get_pos() if self.is_over(mouse_position, (WIDTH/2-70, HEIGHT/2+60)): pygame.draw.rect(game_area, (255,127,0), (WIDTH/2-90, HEIGHT/2+40, 80, 30)) pygame.draw.rect(game_area, (255,255,255), (WIDTH/2-90, HEIGHT/2+40, 80, 30), 2) myprint(game_area, 'Retry(Enter)', (WIDTH/2-76, HEIGHT/2+47),size='dk') else: pygame.draw.rect(game_area, (0,0,0), (WIDTH/2-90, HEIGHT/2+40, 80, 30)) pygame.draw.rect(game_area, (255,255,255), (WIDTH/2-90, HEIGHT/2+40, 80, 30), 2) myprint(game_area, 'Retry(Enter)', (WIDTH/2-76, HEIGHT/2+47),size='dk') if self.is_over(mouse_position, (WIDTH/2+30, HEIGHT/2+60)): pygame.draw.rect(game_area, (255,127,0), (WIDTH/2+10, HEIGHT/2+40, 80, 30)) pygame.draw.rect(game_area, (255,255,255), (WIDTH/2+10, HEIGHT/2+40, 80, 30), 2) myprint(game_area, 'Menu(Esc)', (WIDTH/2+30, HEIGHT/2+47),size='dk') else: pygame.draw.rect(game_area, (0,0,0), (WIDTH/2+10, HEIGHT/2+40, 80, 30)) pygame.draw.rect(game_area, (255,255,255), (WIDTH/2+10, HEIGHT/2+40, 80, 30), 2) myprint(game_area, 'Menu(Esc)', (WIDTH/2+30, HEIGHT/2+47),size='dk') pygame.display.update()
def show_info_game(self): self.cleanscreen() h = myprint(self.screen, u"控制方向:上下左右", (230,150)) myprint(self.screen, u"使用道具:空格", (250,160+h)) myprint(self.screen, u"暂停:p", (275,170+2*h)) pygame.display.update() pygame.time.wait(1000) myprint(self.screen, u"PRESS ENTER TO START...", (450,400), size='en') pygame.display.update() while True: for e in pygame.event.get(): if e.type == QUIT: exit() pressed_keys = pygame.key.get_pressed() #pressed_mouse = pygame.mouse.get_pressed() if pressed_keys[K_RETURN]: break
def run(self): if self.phase == 1: util.myprint("Process started pre-fetching for Validation " + str(self.proc_id) + " : nimgs " + str(self.Nimgs) ) else: util.myprint("Process started pre-fetching for Training " + str(self.proc_id) + " : nimgs " + str(self.Nimgs) ) ## Counter to the entire augmented set count = 0 ## Counter to the relative mini-batch countStep = 0 ## Pre-allocate the data for the mini-batch listData = [None]*self.batch_ck_size while True: for ii in range(0,self.Nimgs): ####### Checking if we finished an (augmented) epoch if count == self.Nimgs: util.myprint("Finished an (augmented) epoch for loader id " + str(self.proc_id) + "...shuffling") count = 0 shuffle(self.indexlist) # ######## Part to resume/wait a certain process when the other is operating # if self.phase == 1: # if not _eventValidList[self.proc_id].is_set(): # util.myprint('Waiting Validation Loader ' + str(self.proc_id) + ' to start again') # _eventValidList[self.proc_id].wait() # else: # if not _eventTrainList[self.proc_id].is_set(): # util.myprint('Waiting Train Loader ' + str(self.proc_id) + ' to start again') # _eventTrainList[self.proc_id].wait() ### Starting to do augmentation batch_img = None #index is of form: #blur_fr_13 XXXm.0hhvfrvXXX_MS000024 !!TMPDIR!!/imgs/XXXm.0hhvfrvXXX/XXXm.0hhvfrvXXX_MS000024.jpg 0 index = self.indexlist[ii] index = index.split(' ') aug_type = index[0] #augemntation type image_key = index[1] # image key image_file_name = index[2] #image label = np.float32(index[3]) #label ## Loading the image with OpenCV flipON = int( np.frombuffer( self.cur_flip.get(image_key) )[1] ) == 1 im = cv2.imread(image_file_name,cv2.CV_LOAD_IMAGE_COLOR) ## Check immediately if we have to flip an image if flipON: im = cv2.flip(im, 1) im_arr = np.asarray(im) aug_im = None if 'align2d' in aug_type or 'blur' in aug_type: lmark = self.cur_land.get(image_key) lmark = np.frombuffer(lmark, dtype='float64').reshape(68,2) lmarks = np.zeros((1,68,2)) lmarks[0] = lmark aug_im = self.aug_tr.augment_fast(aug_type=aug_type,img=im,landmarks=lmarks,flipON=flipON) elif 'render' in aug_type: prj_matrix = np.frombuffer(self.cur_pose.get(image_key+'_'+aug_type), dtype='float64').reshape(3,4) prj_matrix = np.asmatrix(prj_matrix) aug_im = self.aug_tr.augment_fast(aug_type=aug_type,img=im,prj_matrix=prj_matrix,flipON=flipON) try: aug_im = cv2.resize(aug_im, ( self.im_shape[0], self.im_shape[1] ),\ interpolation=cv2.INTER_LINEAR ) batch_img = self.transformer.preprocess(aug_im) except Exception as ex: util.myprint("Warning: Was not able to use aug_img because: " + str(ex)) util.myprint( "Skipping the image: " + image_file_name) count += 1 ##If image have been processes correctly, add it to the mini-batch if batch_img is not None: data = {'img': batch_img , 'label' : label} listData[countStep] = data countStep+=1 if countStep == self.batch_ck_size: isDone = False while not isDone: try: ##This mini-batch is ready to be sent for train ## Resetting the relative listData and countStep self.queue.put_nowait( list(listData) ) except std_Queue.Full as full: pass else: #self.send_conn.send( (listData) ) countStep = 0 isDone = True listData = [None]*self.batch_ck_size
def __init__ (self, mode = 'training', batch_size = 16, im_shape = (224,224), source = None, mean_file=None , latent_dim = None, if_xscale = False): #mean_file = 'model/keras_mean_img.npy' self.mode = mode self.batch_size = batch_size self.im_shape = im_shape self.source = source # path points to the dir with train/val list file self.mean_file = mean_file self.latent_dim = latent_dim self.if_xscale = if_xscale #self.n_classes = n_classes assert source is not None #assert mean_file is not None #assert n_classes is not None params = dict() params['batch_size'] = batch_size params['im_shape'] = im_shape params['split'] = mode # train, val, test params['source'] = source params['mean_file'] = mean_file # only training and validation supported if mode == 'training': list_file = source + 'train.list' self.phase = 0 self.indexlist = [line.rstrip('\n') for line in open(list_file)] ## Immediate Shuffling is important shuffle(self.indexlist) elif mode == 'validation': list_file = source + 'valid.list' self.phase = 1 self.indexlist = [line.rstrip('\n') for line in open(list_file)] self.nb_samples = len(self.indexlist) indexlist_chunks = [ list(i) for i in np.array_split(np.array(self.indexlist), _nJobs) ] self.batch_loader_list = [] for j in range(0,_nJobs): util.myprint("Starting pre-fetching processes id: "+str(j)) batch_loader = BatchLoader(params, indexlist_chunks[j], self.phase, j) ## Starting the process formally ( it will wait until we event.set() ) batch_loader.start() self.batch_loader_list.append(batch_loader) self.collector = Collector( self.batch_loader_list,self.batch_size, self.phase ) def cleanup(): util.myprint('Terminating BatchLoader') for j in range(0,_nJobs): self.batch_loader_list[j].terminate() self.batch_loader_list[j].join() #self.collector.terminate() #self.collector.join() import atexit atexit.register(cleanup) ########### reshape tops#################### #TODO if self.phase == 1: print_info("FaceAugDataGen_valid", params) else: print_info("FaceAugDataGen_train", params) return
def draw(self): self.screen.blit(self.board_image, self.START) self.shape.draw(self.screen) if self.pause: util.myprint(self.screen, "PAUSE", (self.START[0] + 50, self.START[1] + 200), "m")
def _display_score(self): try: self._score_board except AttributeError: self._score_board = (self.START[0] + self.WIDTH + 30, self.START[1] + 100, 200, 260) self._score_level = (self._score_board[0] + 10, self._score_board[1] + 10) self._score_level_v = (self._score_board[0] + 30, self._score_board[1] + 50) self._score_killed = (self._score_board[0] + 10, self._score_board[1] + 90) self._score_killed_v = (self._score_board[0] + 30, self._score_board[1] + 130) self._score_score = (self._score_board[0] + 10, self._score_board[1] + 170) self._score_score_v = (self._score_board[0] + 30, self._score_board[1] + 210) self.screen.fill((0, 0, 0), self._score_board) util.myprint(self.screen, 'LEVEL', self._score_level, 'm') util.myprint(self.screen, self.level, self._score_level_v, 'm') util.myprint(self.screen, 'LINES', self._score_killed, 'm') util.myprint(self.screen, self.killed, self._score_killed_v, 'm') util.myprint(self.screen, 'SCORE', self._score_score, 'm') util.myprint(self.screen, self.score, self._score_score_v, 'm')
def training(config, cla): g_global_step = tf.Variable(0, trainable=False, name=config['model']['type']+"_global_step") glr = config['optimizer']['lr'] sess = tf.Session() # build model G = hparams.get_model(config['model']['type'])(config, sess) ## update params G_vars = [var for var in tf.trainable_variables() if config['model']['type'] in var.name] util.count_params(G_vars, config['model']['type']) util.total_params() g_learning_rate = tf.placeholder(tf.float32, []) g_ozer = hparams.get_optimizer(config['optimizer']['type'])(learn_rate=g_learning_rate) g_grad = g_ozer.compute_gradients(G.loss, G_vars) g_update = g_ozer.apply_gradients(g_grad, global_step=g_global_step) g_grad_fix = g_ozer.compute_gradients(G.loss_fix, G_vars) g_update_fix = g_ozer.apply_gradients(g_grad_fix, global_step=g_global_step) ## restore from checkpoint G_save_path = os.path.join(config['training']['path'], 'generat.ckpt') sess.run(tf.global_variables_initializer()) G.load(G_save_path) history_file = os.path.join(config['training']['path'], 'history.txt') tr_dataset = get_dataset(config, 'tr') cv_dataset = get_dataset(config, 'cv') tr_next = tr_dataset.get_iterator() cv_next = cv_dataset.get_iterator() valid_best_sdr = float('-inf') valid_wait = 0 if config['training']['perm_path'] != None: fixed_perm_list = util.read_pretrained_perm(config['training']['perm_path'], tr_dataset.file_base) last_step = sess.run(g_global_step) tr_audio_perm = {i:[] for i in range(20000)} if last_step == 0 else util.load_perm(config, 'tr', last_step, tr_dataset, 20000) for epoch in range(last_step//(20000//config['training']['batch_size'])+1, config['training']['num_epochs'] + 1): tr_loss = tr_size = tr_sdr = 0.0 util.myprint(history_file, '-' * 20 + ' epoch {} '.format(epoch) + '-' * 20) ## training data initial if hasattr(tr_dataset, 'iterator'): sess.run(tr_dataset.iterator.initializer) else: tr_gen = tr_dataset.get_next() while True: try: feed_audio, audio_idx = sess.run(tr_next) if tr_next != None else next(tr_gen) if config['training']['pit'] == True: g_loss, g_sdr, g_curr_step, _, g_perm_idx = sess.run( fetches=[G.loss, G.sdr, g_global_step, g_update, G.perm_idxs], feed_dict={G.audios: feed_audio, g_learning_rate: glr}) elif config['training']['perm_path'] != None: fixed_perm = np.take(fixed_perm_list, audio_idx, axis=0) g_loss, g_sdr, g_curr_step, _, g_perm_idx = sess.run( fetches=[G.loss_fix, G.sdr_fix, g_global_step, g_update_fix, G.perm_idxs_fix], feed_dict={G.audios: feed_audio, g_learning_rate: glr, G.fixed_perm: fixed_perm}) tr_loss += g_loss tr_sdr += g_sdr tr_size += 1 print('Train step {}: {} = {:5f}, sdr = {:5f}, lr = {}'. format(g_curr_step, config['training']['loss'], g_loss, g_sdr, glr), end='\r') # record label assignment for _i, _id in enumerate(audio_idx): tr_audio_perm[_id].append(g_perm_idx[_i].tolist()) except (tf.errors.OutOfRangeError, StopIteration): util.myprint(history_file, 'Train step {}: {} = {:5f}, sdr = {:5f}, lr = {}'. format(g_curr_step, config['training']['loss'], g_loss, g_sdr, glr)) util.myprint(history_file, 'mean {} = {:5f} , mean sdr = {:5f}, lr = {}'. format(config['training']['loss'], tr_loss/tr_size, tr_sdr/tr_size, glr)) break ## valid iteration if hasattr(cv_dataset, 'iterator'): sess.run(cv_dataset.iterator.initializer) else: cv_gen = cv_dataset.get_next() cv_loss = cv_size = cv_sdr = 0.0 while True: try: feed_audio, audio_idx = sess.run(cv_next) if cv_next != None else next(cv_gen) g_loss, g_sdr = sess.run(fetches=[G.loss, G.sdr], feed_dict={G.audios: feed_audio}) cv_loss += g_loss cv_sdr += g_sdr cv_size += 1 except (tf.errors.OutOfRangeError, StopIteration): curr_loss = cv_loss/cv_size curr_sdr = cv_sdr/cv_size util.myprint(history_file, 'Valid '+ config['training']['loss'] +' = {:5f}, sdr = {}'.\ format(curr_loss, curr_sdr)) ## save model for every improve of the best valid score ## or last epoch if curr_sdr > valid_best_sdr or epoch == config['training']['num_epochs']: util.myprint(history_file, 'Save Model') valid_wait = 0 valid_best_sdr = curr_sdr G.save(G_save_path, g_curr_step) else: valid_wait += 1 if valid_wait == config['training']['half_lr_patience']: glr /= 2; valid_wait = 0 break util.write(os.path.join(config['training']['path'], 'tr_perm.csv'), tr_dataset.file_base, tr_audio_perm, epoch, config['training']['n_speaker'])
def run(self, mode): GAME_WIDTH = 420 GAME_HEIGHT = 440 human = pygame.image.load(file_path("walk.png")).convert_alpha() star_img1= pygame.image.load(file_path("star1.png")).convert_alpha() star_img2= pygame.image.load(file_path("star2.png")).convert_alpha() star_img3= pygame.image.load(file_path("star3.png")).convert_alpha() star_img4= pygame.image.load(file_path("star4.png")).convert_alpha() star_img5= pygame.image.load(file_path("star5.png")).convert_alpha() star_img6= pygame.image.load(file_path("star6.png")).convert_alpha() #star_img7= pygame.image.load(file_path("star7.png")).convert_alpha() paused = False if mode==1: self.scoreleft = 600 DB_COUNT = 10 init_pig = pig.Pig() clock = pygame.time.Clock() world = World(self.screen) gamespeed = 4. for db in xrange(DB_COUNT): db = entity.Dumbbell(world) my_random_posi = self.my_random() db.location = MyVector(my_random_posi) world.add_entity(db) me = entity.Me(world, human) me.location = MyVector((GAME_WIDTH/2, GAME_HEIGHT/2)) world.add_entity(me) while True: for e in pygame.event.get(): if e.type == QUIT: return 'quit' if e.type == KEYUP: if e.key == K_SPACE: paused = not paused if not paused: past_second = clock.tick(30) / 1000.0 self.scoreleft -= 1 # 一定几率不断出现星星 if randint(1,200) == 1: start_obj1 = entity.Star(world, star_img1, 1) my_random_posi = self.my_random() start_obj1.location = MyVector(my_random_posi) world.add_entity(start_obj1) if randint(1,200) == 1: start_obj3 = entity.Star(world, star_img3, 3) my_random_posi = self.my_random() start_obj3.location = MyVector(my_random_posi) world.add_entity(start_obj3) if randint(1,300) == 1: start_obj6 = entity.Star(world, star_img6, 6) my_random_posi = self.my_random() start_obj6.location = MyVector(my_random_posi) world.add_entity(start_obj6) #画世界 world.render() #世界运行 world.process(past_second) #积分区域 score_area = self.screen.subsurface(460, 20, 160, 60) score_area.fill((0,0,0)) myprint(score_area, 'LEVEL 1', (5,5), color=(255,255,255), size='enl') myprint(score_area, 'To Next Level: ', (5,35), color=(255,255,255), size='dk') myprint(score_area, str(self.scoreleft), (80,35), color=(255,255,255), size='dk') #画猪 pig_area = self.screen.subsurface(460, 90, 160, 370) pig_area.fill((0,0,0)) if world.left_20: init_pig.shy = False gamespeed = 4. init_pig.dynamic_tra_start1 = (init_pig.dynamic_tra_start1[0]-20, init_pig.dynamic_tra_start1[1]) init_pig.dynamic_tra_start2 = (init_pig.dynamic_tra_start2[0]-20, init_pig.dynamic_tra_start2[1]) world.left_20 = False if world.right_20: init_pig.shy = False gamespeed = 4. init_pig.dynamic_tra_start1 = (init_pig.dynamic_tra_start1[0]+20, init_pig.dynamic_tra_start1[1]) init_pig.dynamic_tra_start2 = (init_pig.dynamic_tra_start2[0]+20, init_pig.dynamic_tra_start2[1]) world.right_20 = False if world.left_10: init_pig.shy = False gamespeed = 4. init_pig.dynamic_tra_start1 = (init_pig.dynamic_tra_start1[0]-10, init_pig.dynamic_tra_start1[1]) init_pig.dynamic_tra_start2 = (init_pig.dynamic_tra_start2[0]-10, init_pig.dynamic_tra_start2[1]) world.left_10 = False if world.right_10: init_pig.shy = False gamespeed = 4. init_pig.dynamic_tra_start1 = (init_pig.dynamic_tra_start1[0]+10, init_pig.dynamic_tra_start1[1]) init_pig.dynamic_tra_start2 = (init_pig.dynamic_tra_start2[0]+10, init_pig.dynamic_tra_start2[1]) world.right_10 = False if world.speedup: gamespeed = 20. init_pig.shy = True world.speedup = False if world.handsweat: init_pig.shy = False gamespeed = 4. halfneck = 262-(236-init_pig.dynamic_tra_start1[1]+26)/2 init_pig.dynamic_tra_start1 = (init_pig.dynamic_tra_start1[0], halfneck) init_pig.dynamic_tra_start2 = (init_pig.dynamic_tra_start2[0], halfneck) init_pig.sweat = True world.handsweat = False if init_pig.sweat: init_pig.draw_dynamic_pig(pig_area, (80,300), 0.) else: init_pig.draw_dynamic_pig(pig_area, (80,300), past_second, neck_speed=gamespeed) #被哑铃砸到 if world.ifdead: #死亡讯息 game_area = self.screen.subsurface(20, 20, 420, 440) myprint(game_area, u'你被哑铃砸到了!斜方怪非常开心!', (GAME_WIDTH/2-130, GAME_HEIGHT/2), color=(255,127,0)) #myprint(game_area, u'斜方肌猛涨', (GAME_WIDTH/2-130, GAME_HEIGHT/2), color=(255,127,0)) return 'retry1' #斜方怪锻炼完成 if init_pig.pigtootall: #斜方肌太强壮信息 game_area = self.screen.subsurface(20, 20, 420, 440) myprint(game_area, u'怎么都让斜方怪长成这样了!', (GAME_WIDTH/2-115, GAME_HEIGHT/2-20), color=(255,127,0)) myprint(game_area, u'地球再也打不过斜方星了!', (GAME_WIDTH/2-105, GAME_HEIGHT/2), color=(255,127,0)) return 'retry1' #检查是否进入下一关 if self.scoreleft == 0: return 'next2' pygame.display.update() else: print 'paused' if mode == 2: paused = False self.scoreleft = 1000 DB_COUNT = 10 init_pig = pig.Pig() clock = pygame.time.Clock() world = World(self.screen) gamespeed = 6. for db in xrange(DB_COUNT): db = entity.Dumbbell(world) my_random_posi = self.my_random() db.location = MyVector(my_random_posi) world.add_entity(db) me = entity.Me(world, human) me.location = MyVector((GAME_WIDTH/2, GAME_HEIGHT/2)) world.add_entity(me) while True: for e in pygame.event.get(): if e.type == QUIT: return 'quit' if e.type == KEYUP: if e.key == K_SPACE: paused = not paused if not paused: past_second = clock.tick(30) / 1000.0 self.scoreleft -= 1 # 一定几率不断出现星星 if randint(1,400) == 1: start_obj1 = entity.Star(world, star_img1, 1) my_random_posi = self.my_random() start_obj1.location = MyVector(my_random_posi) world.add_entity(start_obj1) if randint(1,200) == 1: start_obj3 = entity.Star(world, star_img3, 3) my_random_posi = self.my_random() start_obj3.location = MyVector(my_random_posi) world.add_entity(start_obj3) if randint(1,400) == 1: start_obj2 = entity.Star(world, star_img2, 2) my_random_posi = self.my_random() start_obj2.location = MyVector(my_random_posi) world.add_entity(start_obj2) if randint(1,200) == 1: start_obj4 = entity.Star(world, star_img4, 4) my_random_posi = self.my_random() start_obj4.location = MyVector(my_random_posi) world.add_entity(start_obj4) if randint(1,300) == 1: start_obj6 = entity.Star(world, star_img6, 6) my_random_posi = self.my_random() start_obj6.location = MyVector(my_random_posi) world.add_entity(start_obj6) #画世界 world.render() #世界运行 world.process(past_second) #积分区域 score_area = self.screen.subsurface(460, 20, 160, 60) score_area.fill((0,0,0)) myprint(score_area, u'LEVEL 2', (5,5), color=(255,255,255), size='enl') myprint(score_area, 'To Next Level: ', (5,35), color=(255,255,255), size='dk') myprint(score_area, str(self.scoreleft), (80,35), color=(255,255,255), size='dk') #画猪 pig_area = self.screen.subsurface(460, 90, 160, 370) pig_area.fill((0,0,0)) if world.left_20: init_pig.shy = False gamespeed = 6. init_pig.dynamic_tra_start1 = (init_pig.dynamic_tra_start1[0]-20, init_pig.dynamic_tra_start1[1]) init_pig.dynamic_tra_start2 = (init_pig.dynamic_tra_start2[0]-20, init_pig.dynamic_tra_start2[1]) world.left_20 = False if world.right_20: init_pig.shy = False gamespeed = 6. init_pig.dynamic_tra_start1 = (init_pig.dynamic_tra_start1[0]+20, init_pig.dynamic_tra_start1[1]) init_pig.dynamic_tra_start2 = (init_pig.dynamic_tra_start2[0]+20, init_pig.dynamic_tra_start2[1]) world.right_20 = False if world.left_10: init_pig.shy = False gamespeed = 6. init_pig.dynamic_tra_start1 = (init_pig.dynamic_tra_start1[0]-10, init_pig.dynamic_tra_start1[1]) init_pig.dynamic_tra_start2 = (init_pig.dynamic_tra_start2[0]-10, init_pig.dynamic_tra_start2[1]) world.left_10 = False if world.right_10: init_pig.shy = False gamespeed = 6. init_pig.dynamic_tra_start1 = (init_pig.dynamic_tra_start1[0]+10, init_pig.dynamic_tra_start1[1]) init_pig.dynamic_tra_start2 = (init_pig.dynamic_tra_start2[0]+10, init_pig.dynamic_tra_start2[1]) world.right_10 = False if world.speedup: gamespeed = 20. init_pig.shy = True world.speedup = False if world.handsweat: init_pig.shy = False gamespeed = 6. halfneck = 262-(236-init_pig.dynamic_tra_start1[1]+26)/2 init_pig.dynamic_tra_start1 = (init_pig.dynamic_tra_start1[0], halfneck) init_pig.dynamic_tra_start2 = (init_pig.dynamic_tra_start2[0], halfneck) init_pig.sweat = True world.handsweat = False if init_pig.sweat: init_pig.draw_dynamic_pig(pig_area, (80,300), 0.) else: init_pig.draw_dynamic_pig(pig_area, (80,300), past_second, neck_speed=gamespeed) #被哑铃砸到 if world.ifdead: #死亡讯息 game_area = self.screen.subsurface(20, 20, 420, 440) myprint(game_area, u'你被哑铃砸到了!斜方怪非常开心!', (GAME_WIDTH/2-130, GAME_HEIGHT/2), color=(255,127,0)) #myprint(game_area, u'斜方肌猛涨', (GAME_WIDTH/2-130, GAME_HEIGHT/2), color=(255,127,0)) return 'retry2' #斜方怪锻炼完成 if init_pig.pigtootall: #斜方肌太强壮信息 game_area = self.screen.subsurface(20, 20, 420, 440) myprint(game_area, u'怎么都让斜方怪长成这样了!', (GAME_WIDTH/2-115, GAME_HEIGHT/2-20), color=(255,127,0)) myprint(game_area, u'地球再也打不过斜方星了!', (GAME_WIDTH/2-105, GAME_HEIGHT/2), color=(255,127,0)) return 'retry2' #检查是否进入下一关 if self.scoreleft == 0: return 'next3' pygame.display.update() else: print 'paused' if mode == 3: paused = False self.scoreleft = 1000 DB_COUNT = 20 init_pig = pig.Pig() clock = pygame.time.Clock() world = World(self.screen) gamespeed = 10. for db in xrange(DB_COUNT): db = entity.Dumbbell(world) my_random_posi = self.my_random() db.location = MyVector(my_random_posi) world.add_entity(db) me = entity.Me(world, human) me.location = MyVector((GAME_WIDTH/2, GAME_HEIGHT/2)) world.add_entity(me) while True: for e in pygame.event.get(): if e.type == QUIT: return 'quit' if e.type == KEYUP: if e.key == K_SPACE: paused = not paused if not paused: past_second = clock.tick(30) / 1000.0 self.scoreleft -= 1 # 一定几率不断出现星星 if randint(1,400) == 1: start_obj1 = entity.Star(world, star_img1, 1) my_random_posi = self.my_random() start_obj1.location = MyVector(my_random_posi) world.add_entity(start_obj1) if randint(1,200) == 1: start_obj3 = entity.Star(world, star_img3, 3) my_random_posi = self.my_random() start_obj3.location = MyVector(my_random_posi) world.add_entity(start_obj3) if randint(1,400) == 1: start_obj2 = entity.Star(world, star_img2, 2) my_random_posi = self.my_random() start_obj2.location = MyVector(my_random_posi) world.add_entity(start_obj2) if randint(1,200) == 1: start_obj4 = entity.Star(world, star_img4, 4) my_random_posi = self.my_random() start_obj4.location = MyVector(my_random_posi) world.add_entity(start_obj4) if randint(1,400) == 1: start_obj5 = entity.Star(world, star_img5, 5) my_random_posi = self.my_random() start_obj5.location = MyVector(my_random_posi) world.add_entity(start_obj5) if randint(1,300) == 1: start_obj6 = entity.Star(world, star_img6, 6) my_random_posi = self.my_random() start_obj6.location = MyVector(my_random_posi) world.add_entity(start_obj6) #画世界 world.render() #世界运行 world.process(past_second) #积分区域 score_area = self.screen.subsurface(460, 20, 160, 60) score_area.fill((0,0,0)) myprint(score_area, u'LEVEL 3', (5,5), color=(255,255,255), size='enl') myprint(score_area, 'To Next Level: ', (5,35), color=(255,255,255), size='dk') myprint(score_area, str(self.scoreleft), (80,35), color=(255,255,255), size='dk') #画猪 pig_area = self.screen.subsurface(460, 90, 160, 370) pig_area.fill((0,0,0)) if world.left_20: init_pig.shy = False gamespeed = 10. init_pig.dynamic_tra_start1 = (init_pig.dynamic_tra_start1[0]-20, init_pig.dynamic_tra_start1[1]) init_pig.dynamic_tra_start2 = (init_pig.dynamic_tra_start2[0]-20, init_pig.dynamic_tra_start2[1]) world.left_20 = False if world.right_20: init_pig.shy = False gamespeed = 10. init_pig.dynamic_tra_start1 = (init_pig.dynamic_tra_start1[0]+20, init_pig.dynamic_tra_start1[1]) init_pig.dynamic_tra_start2 = (init_pig.dynamic_tra_start2[0]+20, init_pig.dynamic_tra_start2[1]) world.right_20 = False if world.left_10: init_pig.shy = False gamespeed = 10. init_pig.dynamic_tra_start1 = (init_pig.dynamic_tra_start1[0]-10, init_pig.dynamic_tra_start1[1]) init_pig.dynamic_tra_start2 = (init_pig.dynamic_tra_start2[0]-10, init_pig.dynamic_tra_start2[1]) world.left_10 = False if world.right_10: init_pig.shy = False gamespeed = 10. init_pig.dynamic_tra_start1 = (init_pig.dynamic_tra_start1[0]+10, init_pig.dynamic_tra_start1[1]) init_pig.dynamic_tra_start2 = (init_pig.dynamic_tra_start2[0]+10, init_pig.dynamic_tra_start2[1]) world.right_10 = False if world.speedup: gamespeed = 20. init_pig.shy = True world.speedup = False if world.handsweat: init_pig.shy = False gamespeed = 10. halfneck = 262-(236-init_pig.dynamic_tra_start1[1]+26)/2 init_pig.dynamic_tra_start1 = (init_pig.dynamic_tra_start1[0], halfneck) init_pig.dynamic_tra_start2 = (init_pig.dynamic_tra_start2[0], halfneck) init_pig.sweat = True world.handsweat = False if init_pig.sweat: init_pig.draw_dynamic_pig(pig_area, (80,300), 0.) else: init_pig.draw_dynamic_pig(pig_area, (80,300), past_second, neck_speed=gamespeed) #被哑铃砸到 if world.ifdead: #死亡讯息 game_area = self.screen.subsurface(20, 20, 420, 440) myprint(game_area, u'你被哑铃砸到了!斜方怪非常开心!', (GAME_WIDTH/2-130, GAME_HEIGHT/2), color=(255,127,0)) #myprint(game_area, u'斜方肌猛涨', (GAME_WIDTH/2-130, GAME_HEIGHT/2), color=(255,127,0)) return 'retry3' #斜方怪锻炼完成 if init_pig.pigtootall: #斜方肌太强壮信息 game_area = self.screen.subsurface(20, 20, 420, 440) myprint(game_area, u'怎么都让斜方怪长成这样了!', (GAME_WIDTH/2-115, GAME_HEIGHT/2-20), color=(255,127,0)) myprint(game_area, u'地球再也打不过斜方星了!', (GAME_WIDTH/2-105, GAME_HEIGHT/2), color=(255,127,0)) return 'retry3' #检查是否进入下一关 if self.scoreleft == 0: return 'next4' pygame.display.update() else: print 'paused'
def _display_score(self): try: self._score_board except AttributeError: self._score_board = ( self.START[0] + self.WIDTH + 30, self.START[1] + 100, 200, 260) self._score_level = ( self._score_board[0] + 10, self._score_board[1] + 10) self._score_level_v = ( self._score_board[0] + 30, self._score_board[1] + 50) self._score_killed = ( self._score_board[0] + 10, self._score_board[1] + 90) self._score_killed_v = ( self._score_board[0] + 30, self._score_board[1] + 130) self._score_score = ( self._score_board[0] + 10, self._score_board[1] + 170) self._score_score_v = ( self._score_board[0] + 30, self._score_board[1] + 210) self.screen.fill((0, 0, 0), self._score_board) util.myprint(self.screen, 'LEVEL', self._score_level, 'm') util.myprint(self.screen, self.level, self._score_level_v, 'm') util.myprint(self.screen, 'LINES', self._score_killed, 'm') util.myprint(self.screen, self.killed, self._score_killed_v, 'm') util.myprint(self.screen, 'SCORE', self._score_score, 'm') util.myprint(self.screen, self.score, self._score_score_v, 'm')
def training(config, cla): g_global_step = tf.Variable(0, trainable=False, name=config['model']['type'] + "_global_step") glr = config['optimizer']['lr'] sess = tf.Session() # build model G = hparams.get_model(config['model']['type'])(config, sess) ## update params G_vars = [ var for var in tf.trainable_variables() if config['model']['type'] in var.name ] util.count_params(G_vars, config['model']['type']) util.total_params() g_learning_rate = tf.placeholder(tf.float32, []) g_ozer = hparams.get_optimizer( config['optimizer']['type'])(learn_rate=g_learning_rate) g_grad = g_ozer.compute_gradients(G.loss, G_vars) g_update = g_ozer.apply_gradients(g_grad, global_step=g_global_step) ## restore from checkpoint G_save_path = os.path.join(config['training']['path'], 'generat.ckpt') sess.run(tf.global_variables_initializer()) G.load(G_save_path) history_file = os.path.join(config['training']['path'], 'history.txt') tr_dataset = get_dataset(config, 'tr') cv_dataset = get_dataset(config, 'cv') tr_next = tr_dataset.get_iterator() cv_next = cv_dataset.get_iterator() valid_best_sdr = float('-inf') valid_wait = 0 for epoch in range(1, config['training']['num_epochs'] + 1): tr_loss = tr_size = tr_sdr = 0.0 util.myprint(history_file, '-' * 20 + ' epoch {} '.format(epoch) + '-' * 20) ## training data initial if hasattr(tr_dataset, 'iterator'): sess.run(tr_dataset.iterator.initializer) else: tr_gen = tr_dataset.get_next() while True: try: feed_audio, audio_idx = sess.run( tr_next) if tr_next != None else next(tr_gen) g_loss, g_sdr, g_curr_step, _ = sess.run( fetches=[G.loss, G.sdr, g_global_step, g_update], feed_dict={ G.audios: feed_audio, g_learning_rate: glr }) tr_loss += g_loss tr_sdr += g_sdr tr_size += 1 print('Train step {}: {} = {:5f}, sdr = {:5f}, lr = {}'.format( g_curr_step, config['training']['loss'], g_loss, g_sdr, glr), end='\r') except (tf.errors.OutOfRangeError, StopIteration): util.myprint( history_file, 'Train step {}: {} = {:5f}, sdr = {:5f}, lr = {}'.format( g_curr_step, config['training']['loss'], g_loss, g_sdr, glr)) util.myprint( history_file, 'mean {} = {:5f} , mean sdr = {:5f}, lr = {}'.format( config['training']['loss'], tr_loss / tr_size, tr_sdr / tr_size, glr)) break ## valid iteration if hasattr(cv_dataset, 'iterator'): sess.run(cv_dataset.iterator.initializer) else: cv_gen = cv_dataset.get_next() cv_loss = cv_size = cv_sdr = 0.0 while True: try: feed_audio, audio_idx = sess.run( cv_next) if cv_next != None else next(cv_gen) g_loss, g_sdr = sess.run(fetches=[G.loss, G.sdr], feed_dict={G.audios: feed_audio}) cv_loss += g_loss cv_sdr += g_sdr cv_size += 1 except (tf.errors.OutOfRangeError, StopIteration): curr_loss = cv_loss / cv_size curr_sdr = cv_sdr / cv_size util.myprint(history_file, 'Valid '+ config['training']['loss'] +' = {:5f}, sdr = {}'.\ format(curr_loss, curr_sdr)) ## save model for every improve of the best valid score ## or last epoch if curr_sdr > valid_best_sdr or epoch == config['training'][ 'num_epochs']: util.myprint(history_file, 'Save Model') valid_wait = 0 valid_best_sdr = curr_sdr G.save(G_save_path, g_curr_step) else: valid_wait += 1 if valid_wait == config['training']['half_lr_patience']: glr /= 2 valid_wait = 0 break
def test(config, cla): log_file = os.path.join(config['training']['path'], cla.ckpt_name, 'log_'+cla.test_set) if not os.path.exists(os.path.join(config['training']['path'], cla.ckpt_name)): os.mkdir(os.path.join(config['training']['path'], cla.ckpt_name)) output_path = os.path.join(config['training']['path'], 'sample') sess = tf.Session() G = hparams.get_model(config['model']['type'])(config, sess) G_save_path = os.path.join(config['training']['path'], 'generat.ckpt') G.load(G_save_path, cla.ckpt_name) if not cla.mix_input_path.endswith('/'): cla.mix_input_path += '/' filenames = [filename for filename in os.listdir(cla.mix_input_path) if filename.endswith('.wav')] sdr_sum = [] sisnr_sum = [] pesq_sum = [] for filename in filenames: util.myprint(log_file, filename) mix_audio = util.load_wav(cla.mix_input_path + filename, config['dataset']['sample_rate']) clean_1 = util.load_wav(cla.clean_input_path + 's1/' + filename, config['dataset']['sample_rate']) clean_2 = util.load_wav(cla.clean_input_path + 's2/' + filename, config['dataset']['sample_rate']) sdr, sisnr, pesq, pit_ch = separate.separate_sample(sess, G, config, mix_audio, clean_1, clean_2) util.myprint(log_file, ' sdr: {}, {}'.format(sdr[0], sdr[1])) util.myprint(log_file, ' sisnr: {}, {}'.format(sisnr[0], sisnr[1])) util.myprint(log_file, ' pesq: {}, {}'.format(pesq[0], pesq[1])) sdr_sum.append(sdr) sisnr_sum.append(sisnr) pesq_sum.append(pesq) sdr_sum = np.array(sdr_sum) sisnr_sum = np.array(sisnr_sum) pesq_sum = np.array(pesq_sum) util.myprint(log_file, 'test sdr : {}'.format(np.mean(sdr_sum))) util.myprint(log_file, 'test sisnr : {}'.format(np.mean(sisnr_sum))) util.myprint(log_file, 'test pesq : {}'.format(np.mean(pesq_sum)))
def __init__(self, actionset=[], infoDept=S_LEN, infoDim=S_INFO, log_path=None, summary_dir=None, nn_model=None, ipcQueue=None, ipcId=None, readOnly=False): assert summary_dir assert (not ipcQueue and not ipcId) or (ipcQueue and ipcId) myprint("Pensieproc init Params:", actionset, infoDept, log_path, summary_dir, nn_model) self.ipcQueue = ipcQueue self.pid = os.getpid() self.ipcId = ipcId self.summary_dir = os.path.join(summary_dir, "rnnBuffer") self.nn_model = None if not nn_model else os.path.join( self.summary_dir, nn_model) self.a_dim = len(actionset) self._vActionset = actionset self._vInfoDim = infoDim self._vInfoDept = infoDept self._vReadOnly = readOnly if not os.path.exists(self.summary_dir): os.makedirs(self.summary_dir) self.sess = tf.Session() # log_file = open(os.path.join(log_path, "PensiveLearner", "wb")) self.actor = a3c.ActorNetwork( self.sess, state_dim=[self._vInfoDim, self._vInfoDept], action_dim=self.a_dim, learning_rate=ACTOR_LR_RATE) self.critic = a3c.CriticNetwork( self.sess, state_dim=[self._vInfoDim, self._vInfoDept], action_dim=self.a_dim, learning_rate=CRITIC_LR_RATE) self.summary_ops, self.summary_vars = a3c.build_summaries() self.sess.run(tf.global_variables_initializer()) self.writer = tf.summary.FileWriter( self.summary_dir, self.sess.graph) # training monitor self.saver = tf.train.Saver() # save neural net parameters # restore neural net parameters self.epoch = 0 if self.nn_model is None and not self.ipcQueue: nn_model, epoch = guessSavedSession(self.summary_dir) if nn_model: self.nn_model = nn_model self.epoch = epoch # nn_model = NN_MODEL if self.nn_model is not None and not self.ipcQueue: # nn_model is the path to file self.saver.restore(self.sess, self.nn_model) myprint("Model restored with `" + self.nn_model + "'") if self.ipcQueue: self.ipcQueue[0].put({ "id": self.ipcId, "pid": self.pid, "cmd": IPC_CMD_PARAM }) myprint("=" * 50) myprint(self.ipcId, ": waiting for ipc") myprint("=" * 50) res = None while True: res = self.ipcQueue[1].get() pid = res["pid"] res = res["res"] if pid == self.pid: break actor_net_params, critic_net_params = res self.actor.set_network_params(actor_net_params) self.critic.set_network_params(critic_net_params) myprint("=" * 50) myprint(self.ipcId, ": ipcOver") myprint("=" * 50) self.s_batch = [] self.a_batch = [] self.r_batch = [] self.entropy_record = [] self.actor_gradient_batch = [] self.critic_gradient_batch = [] self.keyedSBatch = {} self.keyedActionProb = {} self.keyedAction = {} self.keyedInputParam = {}
dataset_tuples = fpa_io.load_split_file(args.dataset_root_folder) lstm_baseline = LSTMBaseline(num_joints=21, num_actions=dataset_tuples['num_actions'], use_cuda=args.use_cuda) if args.use_cuda: lstm_baseline = lstm_baseline.cuda() optimizer = optim.Adadelta(lstm_baseline.parameters(), rho=0.9, weight_decay=0.005, lr=0.05) myprint('Log filepath: ' + str(args.log_filepath), args.log_filepath) myprint('Checkpoint filepath: ' + str(args.checkpoint_filepath), args.log_filepath) myprint('Using CUDA: ' + str(args.use_cuda), args.log_filepath) myprint('Number of epochs: ' + str(args.num_epochs), args.log_filepath) myprint('Number of training triples: ' + str(len(dataset_tuples['train'])), args.log_filepath) losses = [] for i in range(len(dataset_tuples['train'])): losses.append([]) if train: lstm_baseline.train() load_model = False for epoch_idx in range(args.num_epochs - 1):
def cleanup(): util.myprint('Terminating BatchLoader') for j in range(0,_nJobs): self.batch_loader_list[j].terminate() self.batch_loader_list[j].join()
def run(self, info_mode): if info_mode==1: mypig = pig.Pig() star_pig = pig.Pig((140,220)) clock = pygame.time.Clock() grey = (139, 129, 76) while True: for e in pygame.event.get(): if e.type == QUIT: return 'quit' if e.type == MOUSEBUTTONUP: # print 'up' if self.is_over(e.pos, (120,395)) and e.button == 1: play_sound('menu') self.show_star = not self.show_star if self.is_over(e.pos, (550,395)) and e.button == 1: play_sound('go') return 'next1' if self.is_over(e.pos, (40,395)) and e.button == 1: play_sound('menu') if self.show_info == 2: self.show_info = 1 else: self.show_info = 2 if self.is_over(e.pos, (180,399), 32) and e.button == 1: play_sound('menu') self.show_info = 3 #左加20 if self.is_over(e.pos, (220,403), 26) and e.button == 1: play_sound('menu') self.show_info = 4 #右加20 if self.is_over(e.pos, (250,403), 26) and e.button == 1: play_sound('menu') self.show_info = 5 #左加10 if self.is_over(e.pos, (280,399), 32) and e.button == 1: play_sound('menu') self.show_info = 6 #右加10 if self.is_over(e.pos, (320,400), 32) and e.button == 1: play_sound('menu') self.show_info = 7 #表扬 speed*2 if self.is_over(e.pos, (360,403), 26) and e.button == 1: play_sound('menu') self.show_info = 8 #手汗 star_pig.sweat = True # if star # show info = 3,4,5,6,7, past_second = clock.tick(30) / 1000.0 mouse_position = pygame.mouse.get_pos() self.screen.blit(self.background, (0,0)) pig_area = self.screen.subsurface(20, 20, 290, 340) pig_area.fill((0,0,0)) if self.show_info == 2: star_pig.shy = False star_pig.draw_dynamic_pig(pig_area, (140,220), past_second) if star_pig.info_area_restart: star_pig.dynamic_tra_start1 = (133, 156) star_pig.dynamic_tra_start2 = (147, 156) star_pig.info_area_restart = False elif self.show_info == 3: star_pig.shy = False if star_pig.dynamic_tra_start1[0] != 113: star_pig.dynamic_tra_start1 = (133-20, 156) star_pig.dynamic_tra_start2 = (147-20, 156) star_pig.draw_dynamic_pig(pig_area, (140,220), past_second) if star_pig.info_area_restart: star_pig.dynamic_tra_start1 = (133, 156) star_pig.dynamic_tra_start2 = (147, 156) star_pig.info_area_restart = False elif self.show_info == 4: star_pig.shy = False if star_pig.dynamic_tra_start1[0] != 153: star_pig.dynamic_tra_start1 = (133+20, 156) star_pig.dynamic_tra_start2 = (147+20, 156) star_pig.draw_dynamic_pig(pig_area, (140,220), past_second) if star_pig.info_area_restart: star_pig.dynamic_tra_start1 = (133, 156) star_pig.dynamic_tra_start2 = (147, 156) star_pig.info_area_restart = False elif self.show_info == 5: star_pig.shy = False if star_pig.dynamic_tra_start1[0] != 123: star_pig.dynamic_tra_start1 = (133-10, 156) star_pig.dynamic_tra_start2 = (147-10, 156) star_pig.draw_dynamic_pig(pig_area, (140,220), past_second) if star_pig.info_area_restart: star_pig.dynamic_tra_start1 = (133, 156) star_pig.dynamic_tra_start2 = (147, 156) star_pig.info_area_restart = False elif self.show_info == 6: star_pig.shy = False if star_pig.dynamic_tra_start1[0] != 143: star_pig.dynamic_tra_start1 = (133+10, 156) star_pig.dynamic_tra_start2 = (147+10, 156) star_pig.draw_dynamic_pig(pig_area, (140,220), past_second) if star_pig.info_area_restart: star_pig.dynamic_tra_start1 = (133, 156) star_pig.dynamic_tra_start2 = (147, 156) star_pig.info_area_restart = False elif self.show_info == 7: star_pig.shy = True star_pig.draw_dynamic_pig(pig_area, (140,220), past_second, hand_speed = 1000., neck_speed = 60.) if star_pig.info_area_restart: star_pig.dynamic_tra_start1 = (133, 156) star_pig.dynamic_tra_start2 = (147, 156) star_pig.info_area_restart = False elif self.show_info == 8: star_pig.shy = False if star_pig.dynamic_tra_start1[1] < 100: star_pig.sweat = True halfneck = 182-(156-star_pig.dynamic_tra_start1[1]+26)/2 star_pig.dynamic_tra_start1 = (133, halfneck) star_pig.dynamic_tra_start2 = (147, halfneck) if star_pig.sweat: star_pig.draw_dynamic_pig(pig_area, (140,220), 0.) else: star_pig.draw_dynamic_pig(pig_area, (140,220), past_second) if star_pig.info_area_restart: star_pig.dynamic_tra_start1 = (133, 156) star_pig.dynamic_tra_start2 = (147, 156) star_pig.info_area_restart = False else: mypig.drawpig(pig_area, (140,220), past_second) self.draw_arrow(pig_area, (190,205), (220,220)) myprint(pig_area, u'左手举不动了', (200,225), size='c1ss') myprint(pig_area, u'斜方怪还在坚持!', (190,240), size='c1ss') self.draw_arrow(pig_area, (170,165), (202,150)) myprint(pig_area, u'斜方怪拥有', (202,130), size='c1ss') myprint(pig_area, u'傲人的斜方肌', (208,145), size='c1ss') self.draw_arrow(pig_area, (110,215), (80, 220)) myprint(pig_area, u'只用粉色肚兜', (10,225), size='c1ss') myprint(pig_area, u'是他的原则', (15,240), size='c1ss') info_area = self.screen.subsurface(330, 20, 290, 340) info_area.fill((0,0,0)) if self.show_info == 1: myprint(info_area, u'“一二一二,唔噢噢哦~~”', (10, 10), color=(255,127,0)) myprint(info_area, u'!!这里原本是猪哥的房间...', (10, 40)) myprint(info_area, u'...怎么变成了一头怪兽!等等,最', (10, 60)) myprint(info_area, u'近猪哥整天举哑铃,难,难道说!', (10, 80)) myprint(info_area, u'“唔噢噢哦~愚蠢的人类。我是沉', (10, 110), color=(255,127,0)) myprint(info_area, u'睡了很久的斜方怪。这个人对于斜', (10, 130), color=(255,127,0)) myprint(info_area, u'方肌的执着唤醒了我。他的意识已', (10, 150), color=(255,127,0)) myprint(info_area, u'经被我吞噬。从此这副身体由我来', (10, 170), color=(255,127,0)) myprint(info_area, u'控制。我会锻炼出更强壮的斜方肌', (10, 190), color=(255,127,0)) myprint(info_area, u'不久的将来,地球将会被我们斜方', (10, 210), color=(255,127,0)) myprint(info_area, u'星彻底占领!唔噢噢哦~~', (10, 230), color=(255,127,0)) myprint(info_area, u'要抓紧练起来了!一二一二......”', (10, 250), color=(255,127,0)) myprint(info_area, u'...我是认真想的设定。总之猪哥,', (10, 280)) myprint(info_area, u'我不会让你白白牺牲的。地球就', (10, 300)) myprint(info_area, u'交给我来保护吧!', (10, 320)) elif self.show_info == 2: myprint(info_area, u'随着时间,斜方肌会越来越长。', (10, 40)) myprint(info_area, u'你要躲避斜方怪的哑铃攻击,同时', (10, 60)) myprint(info_area, u'不断寻找星星,防止斜方怪锻炼出', (10, 80)) myprint(info_area, u'自豪的斜方肌。', (10, 100)) myprint(info_area, u'移动:上下左右', (10, 130), color=(255,127,0)) myprint(info_area, u'暂停:空格', (10, 150),color=(255,127,0)) myprint(info_area, u'星星对斜方肌的影响:', (10, 180)) myprint(info_area, u'向左长', (10, 200)) self.drawitem(info_area, 'star1.png', (100,201), angle=-1, s=True) self.drawitem(info_area, 'star3.png', (120,201), angle=-1, s=True) myprint(info_area, u'向右长', (10, 220)) self.drawitem(info_area, 'star2.png', (100,221), angle=-1, s=True) self.drawitem(info_area, 'star4.png', (120,221), angle=-1, s=True) myprint(info_area, u'加速长', (10, 240)) self.drawitem(info_area, 'star5.png', (100,241), angle=-1, s=True) myprint(info_area, u'降低一半', (10, 260)) self.drawitem(info_area, 'star6.png', (100,261), angle=-1, s=True) myprint(info_area, u'点击下方星星图标查看具体效果。', (10, 290)) elif self.show_info == 3: myprint(info_area, u'“斜方怪,右边的斜方肌还不行”', (10, 40)) myprint(info_area, u'“soga”', (10, 70), color=(255,127,0)) myprint(info_area, u'黄色星星效果:', (10, 120)) myprint(info_area, u'使右边一边的斜方肌大涨', (10, 140)) myprint(info_area, u'一旦一边的斜方肌大出另一边很多,', (10, 180)) myprint(info_area, u'斜方肌失去平衡回归原点,', (10, 200)) myprint(info_area, u'斜方怪就得重新练起拉。', (10, 220)) elif self.show_info == 4: myprint(info_area, u'“斜方怪,左边的斜方肌搞起”', (10, 40)) myprint(info_area, u'“专攻左边!”', (10, 70), color=(255,127,0)) myprint(info_area, u'灰色星星效果:', (10, 120)) myprint(info_area, u'使左边一边的斜方肌大涨', (10, 140)) elif self.show_info == 5: myprint(info_area, u'“右边还差一点点哦”', (10, 40)) myprint(info_area, u'“没问题!一二一二...”', (10, 70), color=(255,127,0)) myprint(info_area, u'绿色星星效果:', (10, 120)) myprint(info_area, u'使右边一边的斜方肌小涨', (10, 140)) elif self.show_info == 6: myprint(info_area, u'“太帅了斜方怪!左边再一点点...”', (10, 40)) myprint(info_area, u'“ok”', (10, 70), color=(255,127,0)) myprint(info_area, u'冰冻星星效果:', (10, 120)) myprint(info_area, u'使左边一边的斜方肌小涨', (10, 140)) elif self.show_info == 7: myprint(info_area, u'“斜方肌已经完美了”', (10, 40)) myprint(info_area, u'“还,还好。谢谢。”', (10, 70), color=(255,127,0)) myprint(info_area, u'没想到斜方怪竟然害羞了,反而练', (10, 100)) myprint(info_area, u'得更快了', (10, 120)) myprint(info_area, u'象棋星星效果:', (10, 160)) myprint(info_area, u'使斜方肌增长速度加倍,', (10, 180)) myprint(info_area, u'持续时间到下一个星星', (10, 200)) elif self.show_info == 8: myprint(info_area, u'“啊啊啊手汗手汗,都是手汗。”', (10, 40), color=(255,127,0)) myprint(info_area, u'火焰星星效果:', (10, 80)) myprint(info_area, u'斜方怪手汗太多抓不住哑铃。', (10, 100)) myprint(info_area, u'回到一半的斜方肌长度,斜方怪在', (10, 120)) myprint(info_area, u'原地不动等汗干。', (10, 140)) item_area = self.screen.subsurface(20, 380, 600, 70) item_area.fill((0,0,0)) if self.is_over(mouse_position, (40,395)): self.drawitem(item_area, 'home2.png', (20,15)) else: self.drawitem(item_area, 'home.png', (20,15)) myprint(item_area, u'操作介绍', (19, 5), size='c1ss') if self.is_over(mouse_position, (120,395)): item_area.fill(grey, (100,17,48,45)) self.drawitem(item_area, 'box48.png', (100,15)) myprint(item_area, u'道具介绍', (99, 4), size='c1ss') if self.show_star: speed = 600. self.angle += past_second * speed self.try_draw_star('star1.png', mouse_position, (180,399), 32, self.angle, item_area) self.try_draw_star('star2.png', mouse_position, (220,403), 26, self.angle, item_area) self.try_draw_star('star3.png', mouse_position, (250,403), 26, self.angle, item_area) self.try_draw_star('star4.png', mouse_position, (280,399), 32, self.angle, item_area) self.try_draw_star('star5.png', mouse_position, (320,400), 32, self.angle, item_area) self.try_draw_star('star6.png', mouse_position, (360,403), 26, self.angle, item_area) #self.try_draw_star('star7.png', mouse_position, (390,400), 32, self.angle, item_area) else: myprint(item_area, 'click to open', (200, 50), size='dk') self.draw_arrow(item_area, (154, 45), (195, 55)) if self.is_over(mouse_position, (550,395)): self.drawitem(item_area, 'play2.png', (530,15)) else: self.drawitem(item_area, 'play.png', (538,23)) myprint(item_area, u'准备好了', (529, 4), size='c1ss') pygame.display.update()
def show_info_author(self): h = myprint(self.screen, u"本游戏只为测试用,非完成版。", (50,120)) myprint(self.screen, u"游戏内的背景音乐(暂时)是Sehnsucht,存在版权也是很有可能的。", (50,125+h)) myprint(self.screen, u"不过在天朝大家都是免费听歌的,我暂时也就不管了。", (50,130+2*h)) myprint(self.screen, u"有一些图标非原创,但找的都属于commerical allowed", (50,135+3*h)) myprint(self.screen, u"Copyright (c) 2013 HuangShan", (50,160+5*h), size='en') myprint(self.screen, u"Version Beta 1.0", (50,165+6*h), size='en') pygame.display.update() myprint(self.screen, u"PRESS ENTER TO START...", (450,400), size='en') pygame.display.update() while True: for e in pygame.event.get(): if e.type == QUIT: exit() pressed_keys = pygame.key.get_pressed() if pressed_keys[K_RETURN]: break
def _rRecv(self): dt = self.recv.get(timeout=60) if not dt.get("st", False): myprint(dt.get("trace", "")) raise Exception(dt.get("trace", "")) return dt["res"]
def run(self,nextlevel): WIDTH = 420 HEIGHT = 440 game_area = self.screen.subsurface(20, 20, WIDTH, HEIGHT) score_area = self.screen.subsurface(460, 20, 160, 60) pig_area = self.screen.subsurface(460, 90, 160, 370) if nextlevel == 1: while True: for e in pygame.event.get(): if e.type == QUIT: return 'quit' if e.type == MOUSEBUTTONUP: if self.is_over(e.pos, (WIDTH/2-20, HEIGHT/2+60)) and e.button == 1: play_sound('go') pygame.mixer.music.play(-1) return 'game1' if e.type == KEYUP: if e.key == K_RETURN: play_sound('go') pygame.mixer.music.play(-1) return 'game1' self.screen.blit(self.background, (0,0)) game_area.fill((0,0,0)) score_area.fill((0,0,0)) pig_area.fill((0,0,0)) mouse_position = pygame.mouse.get_pos() myprint(game_area, u'Next Level: 1', (WIDTH/2-85, HEIGHT/2-70),size='enl') myprint(game_area, u'还是健身菜鸟的斜方怪', (WIDTH/2-83, HEIGHT/2-35)) if self.is_over(mouse_position, (WIDTH/2-20, HEIGHT/2+60)): pygame.draw.rect(game_area, (255,127,0), (WIDTH/2-40, HEIGHT/2+40, 80, 30)) pygame.draw.rect(game_area, (255,255,255), (WIDTH/2-40, HEIGHT/2+40, 80, 30), 2) myprint(game_area, 'Go(Enter)', (WIDTH/2-19, HEIGHT/2+48), size='dk') else: pygame.draw.rect(game_area, (0,0,0), (WIDTH/2-40, HEIGHT/2+40, 80, 30)) pygame.draw.rect(game_area, (255,255,255), (WIDTH/2-40, HEIGHT/2+40, 80, 30), 2) myprint(game_area, 'Go(Enter)', (WIDTH/2-19, HEIGHT/2+48), size='dk') pygame.display.update() if nextlevel == 2: while True: for e in pygame.event.get(): if e.type == QUIT: return 'quit' if e.type == MOUSEBUTTONUP: if self.is_over(e.pos, (WIDTH/2-20, HEIGHT/2+60)) and e.button == 1: play_sound('go') pygame.mixer.music.play(-1) return 'game2' if e.type == KEYUP: if e.key == K_RETURN: play_sound('go') pygame.mixer.music.play(-1) return 'game2' game_area.fill((0,0,0)) mouse_position = pygame.mouse.get_pos() myprint(game_area, u'Next Level: 2', (WIDTH/2-85, HEIGHT/2-70),size='enl') myprint(game_area, u'斜方怪变得强大了', (WIDTH/2-70, HEIGHT/2-35)) if self.is_over(mouse_position, (WIDTH/2-20, HEIGHT/2+60)): pygame.draw.rect(game_area, (255,127,0), (WIDTH/2-40, HEIGHT/2+40, 80, 30)) pygame.draw.rect(game_area, (255,255,255), (WIDTH/2-40, HEIGHT/2+40, 80, 30), 2) myprint(game_area, 'Go(Enter)', (WIDTH/2-19, HEIGHT/2+48), size='dk') else: pygame.draw.rect(game_area, (0,0,0), (WIDTH/2-40, HEIGHT/2+40, 80, 30)) pygame.draw.rect(game_area, (255,255,255), (WIDTH/2-40, HEIGHT/2+40, 80, 30), 2) myprint(game_area, 'Go(Enter)', (WIDTH/2-19, HEIGHT/2+48), size='dk') pygame.display.update() if nextlevel == 3: while True: for e in pygame.event.get(): if e.type == QUIT: return 'quit' if e.type == MOUSEBUTTONUP: if self.is_over(e.pos, (WIDTH/2-20, HEIGHT/2+60)) and e.button == 1: play_sound('go') pygame.mixer.music.play(-1) return 'game3' if e.type == KEYUP: if e.key == K_RETURN: play_sound('go') pygame.mixer.music.play(-1) return 'game3' game_area.fill((0,0,0)) mouse_position = pygame.mouse.get_pos() myprint(game_area, u'Next Level: 3', (WIDTH/2-85, HEIGHT/2-70),size='enl') myprint(game_area, u'斜方怪大怒:噩梦来临', (WIDTH/2-85, HEIGHT/2-35)) if self.is_over(mouse_position, (WIDTH/2-20, HEIGHT/2+60)): pygame.draw.rect(game_area, (255,127,0), (WIDTH/2-40, HEIGHT/2+40, 80, 30)) pygame.draw.rect(game_area, (255,255,255), (WIDTH/2-40, HEIGHT/2+40, 80, 30), 2) myprint(game_area, 'Go(Enter)', (WIDTH/2-19, HEIGHT/2+48), size='dk') else: pygame.draw.rect(game_area, (0,0,0), (WIDTH/2-40, HEIGHT/2+40, 80, 30)) pygame.draw.rect(game_area, (255,255,255), (WIDTH/2-40, HEIGHT/2+40, 80, 30), 2) myprint(game_area, 'Go(Enter)', (WIDTH/2-19, HEIGHT/2+48), size='dk') pygame.display.update() if nextlevel == 4: pygame.mixer.music.stop() while True: for e in pygame.event.get(): if e.type == QUIT: return 'quit' game_area.fill((0,0,0)) mouse_position = pygame.mouse.get_pos() myprint(game_area, u'斜方怪练不出满意的斜方肌,抑郁而终', (WIDTH/2-155, HEIGHT/2-20)) myprint(game_area, u'CONGRATULATIONS', (WIDTH/2-105, HEIGHT/2)) pygame.display.update()
def getNextAction(self, rnnkey, state): #peerId and segId are Identifier #pendings_, curbufs_, pbdelay_, uploaded_, lastDlAt_, players_, deadline = state # lastPlayerId_, lastQl_, lastClens_, lastStartsAt_, lastFinishAt_, pendings_, deadline = state # thrpt_, lastQl_, lastClens_, clens_, wthrghpt, buf, deadline = state # myprint("thrpt_:", thrpt_, '\n'," lastQl_:", lastQl_, '\n'," lastClens_:", lastClens_, '\n'," clens_:", clens_, '\n'," wthrghpt:", wthrghpt, '\n'," buf:", buf, '\n'," deadline:", deadline, '\n') inputset = state # v_dim = len(thrpt_) # reward is video quality - rebuffer penalty - smooth penalty # retrieve previous state # state = np.zeros((self._vInfoDim, self._vInfoDept)) if len(self.s_batch) == 0: state = np.zeros((self._vInfoDim, self._vInfoDept)) else: state = np.array(self.s_batch[-1], copy=True) # dequeue history record state = np.roll(state, -1, axis=1) for i, x in enumerate(inputset): x = np.array(x).reshape(-1) assert issubclass(x.dtype.type, np.number) and self._vInfoDept >= len(x) if len(x) > 1: state[i, :len(x)] = x else: state[i, :-1] = x # state[ 0, :len(thrpt_)] = thrpt_ # state[ 1, :len(lastQl_)] = lastQl_ # state[ 2, :len(lastClens_)] = lastClens_ # state[ 3, :len(clens_)] = clens_ # state[ 4, -1] = wthrghpt # state[ 5, -1] = buf # state[ 6, -1] = deadline reshapedInput = np.reshape(state, (1, self._vInfoDim, self._vInfoDept)) action_prob = self.actor.predict(reshapedInput) action_cumsum = np.cumsum(action_prob) action = (action_cumsum > np.random.randint(1, RAND_RANGE) / float(RAND_RANGE)).argmax() myprint("action:", action, "action cumsum:", action_cumsum.tolist(), "reshapedInput:", reshapedInput.tolist()) # for i, x in enumerate(state): # if np.count_nonzero(x) <= 0: # myprint("Some error=======================================") # myprint(f"\033[1;31mError in param {i}\033[m") for x in action_prob[0]: if math.isnan(x): myprint(inputset, "batch len", len(self.s_batch), "actor out", self.actor.out) assert not math.isnan(x) # Note: we need to discretize the probability into 1/RAND_RANGE steps, # because there is an intrinsic discrepancy in passing single state and batch states if not self.nn_model or self._vReadOnly: #i.e. only for training self.keyedSBatch[rnnkey] = state self.keyedActionProb[rnnkey] = action_prob self.keyedAction[rnnkey] = action self.keyedInputParam[rnnkey] = inputset return self._vActionset[action]
def save_checkpoint(state, filename='checkpoint.pth.tar', log_filepath=None): myprint("\tSaving a checkpoint...", log_filepath) torch.save(state, filename)
def saveModel(self, end_of_video=False): if self._vReadOnly: return if self.ipcQueue: self.ipcQueue[0].put({ "id": self.ipcId, "cmd": IPC_CMD_UPDATE, "pid": self.pid, "data": [ self.s_batch, self.a_batch, self.r_batch, self.entropy_record, end_of_video ] }) res = None while True: res = self.ipcQueue[1].get() pid = res["pid"] res = res["res"] if pid == self.pid: break actor_net_params, critic_net_params = res self.actor.set_network_params(actor_net_params) self.critic.set_network_params(critic_net_params) del self.s_batch[:] del self.a_batch[:] del self.r_batch[:] del self.entropy_record[:] return actor_gradient, critic_gradient, td_batch = \ a3c.compute_gradients(s_batch=np.stack(self.s_batch, axis=0), # ignore the first chuck a_batch=np.vstack(self.a_batch), # since we don't have the r_batch=np.vstack(self.r_batch), # control over it terminal=end_of_video, actor=self.actor, critic=self.critic) td_loss = np.mean(td_batch) self.actor_gradient_batch.append(actor_gradient) self.critic_gradient_batch.append(critic_gradient) myprint("====") myprint("Quality: Epoch", self.epoch) myprint("TD_loss", td_loss, "Avg_reward", np.mean(self.r_batch), "Avg_entropy", np.mean(self.entropy_record)) myprint("====") summary_str = self.sess.run(self.summary_ops, feed_dict={ self.summary_vars[0]: td_loss, self.summary_vars[1]: np.mean(self.r_batch), self.summary_vars[2]: np.mean(self.entropy_record) }) self.writer.add_summary(summary_str, self.epoch) self.writer.flush() self.entropy_record = [] if len(self.actor_gradient_batch) >= GRADIENT_BATCH_SIZE: assert len(self.actor_gradient_batch) == len( self.critic_gradient_batch) for i in range(len(self.actor_gradient_batch)): self.actor.apply_gradients(self.actor_gradient_batch[i]) self.critic.apply_gradients(self.critic_gradient_batch[i]) self.actor_gradient_batch = [] self.critic_gradient_batch = [] self.epoch += 1 if self.epoch % MODEL_SAVE_INTERVAL == 0: # Save the neural net parameters to disk. save_path = self.saver.save( self.sess, self.summary_dir + "/nn_model_ep_" + str(self.epoch) + ".ckpt") myprint("Model saved in file: %s" % save_path) del self.s_batch[:] del self.a_batch[:] del self.r_batch[:]
def draw(self): self.screen.blit(self.board_image, self.START) self.shape.draw(self.screen) if self.pause: util.myprint(self.screen, "PAUSE", (self.START[0]+50, self.START[1]+200), "m")