def run(self): txt_file = open("./result/events.txt", "w") count = 0 while H.Now_step < H.SIM_END: # select next time stop H.Now_step, type, id = self.next() if H.Now_step >= H.SIM_END: break patient = self.waiting_place[type].send_patient() assert isinstance(patient, Patient) if not H.MUTE: H.print_update(H.Red, count, type, id) if H.RECORD: H.write_update(count, type, id, txt_file) # operate next before_State = patient.isRevisit() # the state before serve patient = self.net[type][id].work(patient) # transmit the patient TO = None if before_State and type == 0: # revisit doctor. TO = 2019 # leave hospital else: if len(patient.checklist) == 0: if len(patient.check_list) == 0: TO = 2020 # leave hospital else: TO = 0 if patient.time[0, -1] == 0: last = self.argmax_report_time(patient) patient.time[-1, 0] = patient.time[ last, 3] + self.walk_time[last, 0] self.waiting_place[TO].add_patient(patient, True) else: TO = 2000 # for extneral arrivals, send to other doctors else: TO = patient.checklist.pop(0) patient.time[ TO, 0] = patient.time[type, 2] + self.walk_time[type, TO] self.waiting_place[TO].add_patient(patient) if not H.MUTE: H.print_transit(H.Yellow, TO, patient, type) if H.RECORD: H.write_transit(TO, patient, type, txt_file) count += 1 if not H.MUTE: patient.print_info() if H.RECORD: H.write_info(patient, txt_file)
def make_clips(avi_files, verbose, video_dir, dest_folder): c_error = 0 c_ok = 0 l_error = [] l_ok = [] len_total = len(avi_files) for vid_f in avi_files: new_fname = os.path.splitext( vid_f)[0] + '.mp4' ## no file extention and add mp4 extention command = "ffmpeg -i " + os.path.join( video_dir, vid_f ) + " -vcodec libx264 -crf 18 -c:a aac -strict -2 -pix_fmt yuv420p " + os.path.join( dest_folder, new_fname) print(command) process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE) process.wait() if process.returncode != 0: c_error = c_error + 1 l_error.append((vid_f, ' ')) else: c_ok = c_ok + 1 l_ok.append((vid_f, ' ')) if verbose: #this percentage may not be 100 given so videoId doesnot exist sucess_progress = 100.0 * (c_ok) / float(len_total) total_percentage = 100.0 * (c_ok + c_error) / float(len_total) print("*************************") print("Percent of videos successfully clipped: ", round(sucess_progress, 3)) print("Percent of total videos processed: ", round(total_percentage, 3)) print("*************************") if (c_ok % 30 == 0): write_info(l_ok, c_ok, dest_folder + "/" + "ff_ok_iter_avi.txt") write_info(l_error, c_error, dest_folder + "/" + "ff_fail_iter_avi.txt") # make sure to ffmpeg run smoothly sleep(3) print("Numebe of time that error happened is:", c_error) print("Numebe of time that file successfully encoded is:", c_ok) return l_ok, c_ok, l_error, c_error
def test(self): # Test and save the result test_color_result = self._test(self.test_color_loader) test_gray_result = self._test(self.test_gray_loader) utils.save_pkl(test_color_result, os.path.join(self.save_path, 'test_color_result.pkl')) utils.save_pkl(test_gray_result, os.path.join(self.save_path, 'test_gray_result.pkl')) # Output the classification accuracy on test set info = ('Test on color images accuracy: {}, domain accuracy; {}\n' 'Test on gray images accuracy: {}, domain accuracy: {}'.format( test_color_result['class_accuracy'], test_color_result['domain_accuracy'], test_gray_result['class_accuracy'], test_gray_result['domain_accuracy'])) utils.write_info(os.path.join(self.save_path, 'test_result.txt'), info)
def test(self): # Test and save the result state_dict = torch.load(os.path.join(self.save_path, 'best.pth')) self.load_state_dict(state_dict) dev_class_loss, dev_domain_loss, dev_class_output, dev_domain_output, \ dev_feature, dev_domain_accuracy = self._test(self.dev_loader) dev_predict_prob = self.inference(dev_class_output) dev_per_class_AP = utils.compute_weighted_AP(self.dev_target, dev_predict_prob, self.dev_class_weight) dev_mAP = utils.compute_mAP(dev_per_class_AP, self.subclass_idx) dev_result = { 'output': dev_class_output.cpu().numpy(), 'feature': dev_feature.cpu().numpy(), 'per_class_AP': dev_per_class_AP, 'mAP': dev_mAP, 'domain_output': dev_domain_output.cpu().numpy(), 'domain_accuracy': dev_domain_accuracy } utils.save_pkl(dev_result, os.path.join(self.save_path, 'dev_result.pkl')) test_class_loss, test_domain_loss, test_class_output, test_domain_output, \ test_feature, test_domain_accuracy = self._test(self.test_loader) test_predict_prob = self.inference(test_class_output) test_per_class_AP = utils.compute_weighted_AP(self.test_target, test_predict_prob, self.test_class_weight) test_mAP = utils.compute_mAP(test_per_class_AP, self.subclass_idx) test_result = { 'output': test_class_output.cpu().numpy(), 'feature': test_feature.cpu().numpy(), 'per_class_AP': test_per_class_AP, 'mAP': test_mAP, 'domain_output': test_domain_output.cpu().numpy(), 'domain_accuracy': test_domain_accuracy } utils.save_pkl(test_result, os.path.join(self.save_path, 'test_result.pkl')) # Output the mean AP for the best model on dev and test set info = ('Dev mAP: {}\n' 'Test mAP: {}'.format(dev_mAP, test_mAP)) utils.write_info(os.path.join(self.save_path, 'result.txt'), info)
def test(self): # Test and save the result state_dict = torch.load(os.path.join(self.save_path, 'ckpt.pth')) self.load_state_dict(state_dict) test_color_result = self._test(self.test_color_loader) test_gray_result = self._test(self.test_gray_loader) utils.save_pkl(test_color_result, os.path.join(self.save_path, 'test_color_result.pkl')) utils.save_pkl(test_gray_result, os.path.join(self.save_path, 'test_gray_result.pkl')) # Output the classification accuracy on test set for different inference # methods info = ('Test on color images accuracy sum prob without prior shift: {}\n' 'Test on color images accuracy sum prob with prior shift: {}\n' 'Test on color images accuracy max prob with prior shift: {}\n' 'Test on gray images accuracy sum prob without prior shift: {}\n' 'Test on gray images accuracy sum prob with prior shift: {}\n' 'Test on gray images accuracy max prob with prior shift: {}\n' .format(test_color_result['accuracy_sum_prob_wo_prior_shift'], test_color_result['accuracy_sum_prob_w_prior_shift'], test_color_result['accuracy_max_prob_w_prior_shift'], test_gray_result['accuracy_sum_prob_wo_prior_shift'], test_gray_result['accuracy_sum_prob_w_prior_shift'], test_gray_result['accuracy_max_prob_w_prior_shift'])) utils.write_info(os.path.join(self.save_path, 'test_result.txt'), info)
def test(self): # Test and save the result state_dict = torch.load(os.path.join(self.save_path, 'ckpt.pth')) self.load_state_dict(state_dict) test_color_result = self._test(self.test_color_loader, test_on_color=True) test_gray_result = self._test(self.test_gray_loader, test_on_color=False) utils.save_pkl(test_color_result, os.path.join(self.save_path, 'test_color_result.pkl')) utils.save_pkl(test_gray_result, os.path.join(self.save_path, 'test_gray_result.pkl')) # Output the classification accuracy on test set for different inference # methods info = ('Test on color images accuracy conditional: {}\n' 'Test on color images accuracy sum out: {}\n' 'Test on gray images accuracy conditional: {}\n' 'Test on gray images accuracy sum out: {}\n'.format( test_color_result['accuracy_conditional'], test_color_result['accuracy_sum_out'], test_gray_result['accuracy_conditional'], test_gray_result['accuracy_sum_out'])) utils.write_info(os.path.join(self.save_path, 'test_result.txt'), info)
def train_model(args): """ Train model """ train_mtx, vocab, train_labels = check_and_load_training_data(args) # -- configuration -- config = utils.create_baysmm_config(args) config['vocab_size'] = len(vocab) config['n_docs'] = train_mtx.shape[0] config['dtype'] = 'float' # -- end of configuration -- logging.basicConfig(format='%(asctime)s %(message)s', datefmt='%d-%m-%Y %H:%M:%S', filename=config['exp_dir'] + 'training.log', filemode='a', level=getattr(logging, args.log.upper(), None)) print("Log file:", config['exp_dir'] + 'training.log') if args.v: logging.getLogger().addHandler(logging.StreamHandler()) logging.info('PyTorch version: %s', str(torch.__version__)) model = create_model(train_mtx, config, args) # params = utils.load_params(eng_model_h5_file) # model.Q.data = params['Q'].to(device=model.device) # if config['cuda']: # utils.estimate_approx_num_batches(model, train_mtx) if args.trn <= config['trn_done']: logging.info('Found model that is already trained.') return config['trn_iters'] = args.trn optims = create_optimizers(model, config) # optims['Q'] = None utils.save_config(config) dset = utils.SMMDataset(train_mtx, train_labels, len(vocab), 'unsup') dset.to_device(model.device) if args.batchwise: model, loss_iters = batch_wise_training(model, optims, dset, config, args) else: logging.info('Training on {:d} docs.'.format(config['n_docs'])) loss_iters = model.train_me(dset, optims, args.nb) t_sparsity = utils.t_sparsity(model) utils.write_info(model.config, "Sparsity in T: {:.2f}".format(t_sparsity)) logging.info("Initial ELBO: {:.1f}".format(-loss_iters[0][0])) logging.info(" Final ELBO: {:.1f}".format(-loss_iters[-1][0])) logging.info("Sparsity in T: {:.2f}".format(t_sparsity)) utils.save_model(model) base = os.path.basename(args.mtx_file).split('.')[0] sfx = "_T{:d}".format(config['trn_done']) utils.save_loss(loss_iters, model.config, base, sfx)
def build_info(info): write_info(render_info(info)) return render_info(info)
async def info(ctx): embed = write_info() await ctx.send(embed=embed)
data_val_2, data_val_3, gt_val) acc = (val_acc[0] + val_acc[1] + val_acc[2]) / 3 if acc >= best_acc: best_acc = acc best_epoch = epoch + 1 cnn_model.saver.save(sess, '%s/checkpoint' % FLAGS.train_dir, global_step=cnn_model.global_step) epoch_time = time.time() - start_time document(epoch, epoch_time, train_loss, train_acc, val_loss, val_acc, doc) plot_doc(doc) write_info(best_epoch, best_acc) else: cnn_model = Model() if FLAGS.inference_version == -1: print("Please set the inference version!") else: model_path = '%s/checkpoint-%08d' % (FLAGS.train_dir, FLAGS.inference_version) cnn_model.saver.restore(sess, model_path) data_1, data_2, data_3, gt = get_data() test_acc, left_acc, right_acc = test(cnn_model, sess, data_1, data_2, data_3, gt) write_test(test_acc, left_acc, right_acc)
def test(self): # Test and save the result for different inference methods dev_mAP_conditional = self._compute_result( 'best-conditional.pth', self.dev_loader, self.dev_target, self.dev_class_weight, self.inference_conditional, 'dev_conditional_result.pkl', conditional=True) test_mAP_conditional = self._compute_result( 'best-conditional.pth', self.test_loader, self.test_target, self.test_class_weight, self.inference_conditional, 'test_conditional_result.pkl', conditional=True) dev_mAP_max = self._compute_result('best-max.pth', self.dev_loader, self.dev_target, self.dev_class_weight, self.inference_max, 'dev_max_result.pkl') test_mAP_max = self._compute_result('best-max.pth', self.test_loader, self.test_target, self.test_class_weight, self.inference_max, 'test_max_result.pkl') dev_mAP_sum_prob = self._compute_result('best-sum_prob.pth', self.dev_loader, self.dev_target, self.dev_class_weight, self.inference_sum_prob, 'dev_sum_prob_result.pkl') test_mAP_sum_prob = self._compute_result('best-sum_prob.pth', self.test_loader, self.test_target, self.test_class_weight, self.inference_sum_prob, 'test_sum_prob_result.pkl') dev_mAP_sum_out = self._compute_result('best-sum_out.pth', self.dev_loader, self.dev_target, self.dev_class_weight, self.inference_sum_out, 'dev_sum_out_result.pkl') test_mAP_sum_out = self._compute_result('best-sum_out.pth', self.test_loader, self.test_target, self.test_class_weight, self.inference_sum_out, 'test_sum_out_result.pkl') # Output the mean AP for the best model on dev and test set info = (( 'Dev conditional mAP: {}, max mAP: {}, sum prob mAP: {}, sum out mAP: {}\n' 'Test conditional mAP: {}, max mAP: {}, sum prob mAP: {}, sum out mAP: {}' ).format(dev_mAP_conditional, dev_mAP_max, dev_mAP_sum_prob, dev_mAP_sum_out, test_mAP_conditional, test_mAP_max, test_mAP_sum_prob, test_mAP_sum_out)) utils.write_info(os.path.join(self.save_path, 'result.txt'), info)
# make sure to ffmpeg run smoothly sleep(3) print("Numebe of time that error happened is:", c_error) print("Numebe of time that file successfully encoded is:", c_ok) return l_ok, c_ok, l_error, c_error if __name__ == "__main__": parser = argparse.ArgumentParser() #input json parser.add_argument('--video_dir', required=True, help='source video files dir') parser.add_argument('--output', required=True, help='dest folder for clip files') parser.add_argument('--verbose', help='show progress', default=True) args = parser.parse_args() avi_files = get_list_files(args.video_dir, f_ext='avi') # this function returns the list of videos which are sucessfully downloaded i_success, c_success, i_fail, c_fail = make_clips(avi_files, args.verbose, args.video_dir, args.output) write_info(i_success, c_success, args.output + "/ff_ok.txt") write_info(i_fail, c_fail, args.output + "/ff_fail.txt")
def main(args): # Set seed utils.set_seed_everywhere(args.seed) # Initialize environments gym.logger.set_level(40) image_size = 84 if args.algorithm == 'sac' else 100 env = make_env(domain_name=args.domain_name, task_name=args.task_name, seed=args.seed, episode_length=args.episode_length, action_repeat=args.action_repeat, image_size=image_size, mode='train') test_env = make_env(domain_name=args.domain_name, task_name=args.task_name, seed=args.seed + 42, episode_length=args.episode_length, action_repeat=args.action_repeat, image_size=image_size, mode=args.eval_mode) # Create working directory work_dir = os.path.join(args.log_dir, args.domain_name + '_' + args.task_name, args.algorithm, str(args.seed)) print('Working directory:', work_dir) assert not os.path.exists(os.path.join( work_dir, 'train.log')), 'specified working directory already exists' utils.make_dir(work_dir) model_dir = utils.make_dir(os.path.join(work_dir, 'model')) video_dir = utils.make_dir(os.path.join(work_dir, 'video')) video = VideoRecorder(video_dir if args.save_video else None, height=448, width=448) utils.write_info(args, os.path.join(work_dir, 'info.log')) # Prepare agent assert torch.cuda.is_available(), 'must have cuda enabled' replay_buffer = utils.ReplayBuffer(obs_shape=env.observation_space.shape, action_shape=env.action_space.shape, capacity=args.train_steps, batch_size=args.batch_size) cropped_obs_shape = (3 * args.frame_stack, 84, 84) agent = make_agent(obs_shape=cropped_obs_shape, action_shape=env.action_space.shape, args=args) start_step, episode, episode_reward, done = 0, 0, 0, True L = Logger(work_dir) start_time = time.time() for step in range(start_step, args.train_steps + 1): if done: if step > start_step: L.log('train/duration', time.time() - start_time, step) start_time = time.time() L.dump(step) # Evaluate agent periodically if step % args.eval_freq == 0: print('Evaluating:', work_dir) L.log('eval/episode', episode, step) evaluate(env, agent, video, args.eval_episodes, L, step) evaluate(test_env, agent, video, args.eval_episodes, L, step, test_env=True) L.dump(step) # Save agent periodically if step > start_step and step % args.save_freq == 0: torch.save(agent, os.path.join(model_dir, f'{step}.pt')) L.log('train/episode_reward', episode_reward, step) obs = env.reset() done = False episode_reward = 0 episode_step = 0 episode += 1 L.log('train/episode', episode, step) # Sample action for data collection if step < args.init_steps: action = env.action_space.sample() else: with utils.eval_mode(agent): action = agent.sample_action(obs) # Run training update if step >= args.init_steps: num_updates = args.init_steps if step == args.init_steps else 1 for _ in range(num_updates): agent.update(replay_buffer, L, step) # Take step next_obs, reward, done, _ = env.step(action) done_bool = 0 if episode_step + 1 == env._max_episode_steps else float( done) replay_buffer.add(obs, action, reward, next_obs, done_bool) episode_reward += reward obs = next_obs episode_step += 1 print('Completed training for', work_dir)