def backward(): yolo = YOLO() data = Data(voc_root_dir, names_file, class_num, batch_size, anchors, is_tiny=False, size=size) inputs = tf.compat.v1.placeholder(dtype=tf.float32, shape=[batch_size, None, None, 3]) y1_true = tf.compat.v1.placeholder( dtype=tf.float32, shape=[batch_size, None, None, 3, 4 + 1 + class_num]) y2_true = tf.compat.v1.placeholder( dtype=tf.float32, shape=[batch_size, None, None, 3, 4 + 1 + class_num]) y3_true = tf.compat.v1.placeholder( dtype=tf.float32, shape=[batch_size, None, None, 3, 4 + 1 + class_num]) feature_y1, feature_y2, feature_y3 = yolo.forward( inputs, class_num, weight_decay=weight_decay, isTrain=True) global_step = tf.Variable(0, trainable=False) # loss value of yolov4 loss = Loss().yolo_loss([feature_y1, feature_y2, feature_y3], [y1_true, y2_true, y3_true], [anchors[2], anchors[1], anchors[0]], width, height, class_num, cls_normalizer=cls_normalizer, iou_normalizer=iou_normalizer, iou_thresh=iou_thresh, prob_thresh=prob_thresh, score_thresh=score_thresh) l2_loss = tf.compat.v1.losses.get_regularization_loss() epoch = compute_curr_epoch(global_step, batch_size, len(data.imgs_path)) lr = Lr.config_lr(lr_type, lr_init, lr_lower=lr_lower, \ piecewise_boundaries=piecewise_boundaries, \ piecewise_values=piecewise_values, epoch=epoch) optimizer = Optimizer.config_optimizer(optimizer_type, lr, momentum) update_ops = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.UPDATE_OPS) with tf.control_dependencies(update_ops): gvs = optimizer.compute_gradients(loss + l2_loss) clip_grad_var = [ gv if gv[0] is None else [tf.clip_by_norm(gv[0], 100.), gv[1]] for gv in gvs ] train_step = optimizer.apply_gradients(clip_grad_var, global_step=global_step) # initialize init = tf.compat.v1.global_variables_initializer() saver = tf.compat.v1.train.Saver() with tf.compat.v1.Session() as sess: sess.run(init) step = 0 ckpt = tf.compat.v1.train.get_checkpoint_state(model_path) if ckpt and ckpt.model_checkpoint_path: saver.restore(sess, ckpt.model_checkpoint_path) step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1] step = eval(step) Log.add_log("message: load ckpt model, global_step=" + str(step)) else: Log.add_log("message:can not fint ckpt model") curr_epoch = step // data.steps_per_epoch while curr_epoch < total_epoch: for _ in range(data.steps_per_epoch): start = time.perf_counter() batch_img, y1, y2, y3 = next(data) _, loss_, step, lr_ = sess.run( [train_step, loss, global_step, lr], feed_dict={ inputs: batch_img, y1_true: y1, y2_true: y2, y3_true: y3 }) end = time.perf_counter() if (loss_ > 1e3) and (step > 1e3): Log.add_log("error:loss exception, loss_value = " + str(loss_)) ''' break the process or lower learning rate ''' raise ValueError("error:loss exception, loss_value = " + str(loss_) + ", please lower your learning rate") # lr = tf.math.maximum(tf.math.divide(lr, 10), config.lr_lower) if step % 5 == 2: print( "step: %6d, epoch: %3d, loss: %.5g\t, wh: %3d, lr:%.5g\t, time: %5f s" % (step, curr_epoch, loss_, width, lr_, end - start)) Log.add_loss(str(step) + "\t" + str(loss_)) curr_epoch += 1 if curr_epoch % save_per_epoch == 0: # save ckpt model Log.add_log("message: save ckpt model, step=" + str(step) + ", lr=" + str(lr_)) saver.save(sess, path.join(model_path, model_name), global_step=step) Log.add_log("message: save final ckpt model, step=" + str(step)) saver.save(sess, path.join(model_path, model_name), global_step=step) return 0
road_timings=ordered_road_timings) # timer1, route_list_1 = thread1.result() timer2, route_list_2 = thread2.result() timer3, route_list_3 = thread3.result() timer4, route_list_4 = thread4.result() # all_timings = [] all_timings.append(timer1) all_timings.append(timer2) all_timings.append(timer3) all_timings.append(timer4) # all_routes = [] all_routes.append(route_list_1) all_routes.append(route_list_2) all_routes.append(route_list_3) all_routes.append(route_list_4) # # Calling the optimizer method to decide which was the fastest route and writing contents of routes to rd file routes, timing = o.get_optimizer_results(all_timings, all_routes) for route in routes: oh.write_data(ROUTE_FILE_PATH, route) print(timing) # # ret, route_file_list = util.route_finder( # inward=inward, # timer=timer, # patience_level=patience_level, # current_loc=current_loc) # print(route_file_list, ret)