def get_modules(samples, path_py, path_cy, ignore_error=False): samples = samples.split(",") samples = list(map(int, samples)) samples.sort() if path_py != "": module_name_py = path_py.split("/")[-1].replace(".py", "") module_path_py = "//".join(path_py.split("/")[:-1]) spec_py = spec_from_file_location(module_name_py, path_py) module_py = module_from_spec(spec_py) sys.path.insert(0, module_path_py) spec_py.loader.exec_module(module_py) timer_py = Timer(module_py, spec_py) timer_py(samples, ignore_error) if path_cy != "": module_name_cy = path_cy.split("/")[-1].replace( ".cp36-win_amd64.pyd", "") module_path_cy = "//".join(path_cy.split("/")[:-1]) spec_cy = spec_from_file_location(module_name_cy, path_cy) module_cy = module_from_spec(spec_cy) sys.path.insert(0, module_path_cy) spec_cy.loader.exec_module(module_cy) timer_cy = Timer(module_cy, spec_cy) timer_cy(samples, ignore_error)
def post(self): checked_request = self.__check_request(request.form) if not Spot.find_by(forsquare_id=checked_request['foursquare_id']): checked_request = self.__re_check_request(request.form) spot = Spot(forsquare_id=checked_request['foursquare_id'], name=checked_request['_location'], latlng='POINT(' + checked_request['lng'] + ' ' + checked_request['lat'] + ")") spot.insert() spot = Spot.find_by(forsquare_id=checked_request['foursquare_id']) with Timer.transaction(): timer = Timer(user_id=g.user.id, spot_id=spot.id, start_at=checked_request["start_at"]) timer.insert() return jsonify(status=200, message='ok', request=request.form, response={'timer_id': timer.id})
def _timer_initialize(self, complexity): self._timer = Timer(complexity['timer'], self._nightmare_mode, self._view.timer_update, self._game_over) self._timer.start()
def train(): total_timer = Timer() train_timer = Timer() load_timer = Timer() max_epoch = 30 epoch_step = int(cfg.train_num//cfg.batch_size) t = 1 for epoch in range(1, max_epoch + 1): print('-'*25, 'epoch', epoch,'/',str(max_epoch), '-'*25) t_loss = 0 ll_loss = 0 r_loss = 0 c_loss = 0 for step in range(1, epoch_step + 1): t = t + 1 total_timer.tic() load_timer.tic() images, labels, imnm, num_boxes, imsize = data.get() # load_timer.toc() feed_dict = {input_: images, get_boxes: labels[..., 1:5][:, ::-1, :], get_classes: labels[..., 0].reshape((cfg.batch_size, -1))[:, ::-1] } if cfg.cnt_branch: _, g_step_, tt_loss, cl_loss, cn_loss, re_loss, lr_ = sess.run( [train_op, global_step, total_loss, cls_loss, cnt_loss, reg_loss, lr], feed_dict = feed_dict) else: _, g_step_, tt_loss, cl_loss, re_loss, lr_ = sess.run( [train_op, global_step, total_loss, cls_loss, reg_loss, lr], feed_dict = feed_dict) total_timer.toc() if g_step_%50 ==0: sys.stdout.write('\r>> ' + 'iters '+str(g_step_)+str('/')+str(epoch_step*max_epoch)+' loss '+str(tt_loss) + ' ') sys.stdout.flush() summary_str = sess.run(summary_op, feed_dict = feed_dict) train_total_summary = tf.Summary(value=[ tf.Summary.Value(tag="config/learning rate", simple_value=lr_), tf.Summary.Value(tag="train/classification/focal_loss", simple_value=cfg.class_weight*cl_loss), tf.Summary.Value(tag="train/classification/cnt_loss", simple_value=cfg.cnt_weight*cn_loss), # tf.Summary.Value(tag="train/p_nm", simple_value=p_nm_), tf.Summary.Value(tag="train/regress_loss", simple_value=cfg.regress_weight*re_loss), # tf.Summary.Value(tag="train/clone_loss", simple_value=cfg.class_weight*cl_loss + cfg.regress_weight*re_loss + cfg.cnt_weight*cn_loss), # tf.Summary.Value(tag="train/l2_loss", simple_value=l2_loss), tf.Summary.Value(tag="train/total_loss", simple_value=tt_loss) ]) print('curent speed: ', total_timer.diff, 'remain time: ', total_timer.remain(g_step_, epoch_step*max_epoch)) summary_writer.add_summary(summary_str, g_step_) summary_writer.add_summary(train_total_summary, g_step_) if g_step_%10000 == 0: print('saving checkpoint') saver.save(sess, cfg.ckecpoint_file + '/model.ckpt', g_step_) total_timer.toc() sys.stdout.write('\n') print('>> mean loss', t_loss) print('curent speed: ', total_timer.average_time, 'remain time: ', total_timer.remain(g_step_, epoch_step*max_epoch)) print('saving checkpoint') saver.save(sess, cfg.ckecpoint_file + '/model.ckpt', g_step_)
print('epoch', epoch) g_list = tf.global_variables() sess = tf.Session() summary_writer = tf.summary.FileWriter(cfg.ckecpoint_file, sess.graph) sess.run(tf.global_variables_initializer()) if restore_path is not None: print('Restoring weights from: ' + restore_path) restorer = tf.train.Saver(g_list) restorer.restore(sess, restore_path) if __name__ == '__main__': val_timer = Timer() data = pascal_voc('test', False, cfg.test_img_path, cfg.test_label_path, cfg.test_img_txt, False) val_pred = [] # saved_pred = {} gt_dict = {} val_rloss = 0 val_closs = 0 val_cnt_loss = 0 for val_step in range(1, cfg.test_num + 1): val_timer.tic() images, labels, imnm, num_boxes, imsize = data.get() feed_dict = { input_: images, get_boxes: labels[..., 1:5][:, ::-1, :], get_classes: labels[..., 0].reshape((cfg.batch_size, -1))[:, ::-1]