def load_model(model_name, weight_path, input_size, framework): assert model_name in ['yolov3_tiny', 'yolov3', 'yolov4'] NUM_CLASS = len(utils.read_class_names(cfg.YOLO.CLASSES)) if framework == 'tf': input_layer = tf.keras.layers.Input([input_size, input_size, 3]) if model_name == 'yolov3_tiny': feature_maps = YOLOv3_tiny(input_layer, NUM_CLASS) bbox_tensors = [] for i, fm in enumerate(feature_maps): bbox_tensor = ops.decode(fm, NUM_CLASS) bbox_tensors.append(bbox_tensor) model = tf.keras.Model(input_layer, bbox_tensors) elif model_name == 'yolov3': feature_maps = YOLOv3(input_layer, NUM_CLASS) bbox_tensors = [] for i, fm in enumerate(feature_maps): bbox_tensor = ops.decode(fm, NUM_CLASS) bbox_tensors.append(bbox_tensor) model = tf.keras.Model(input_layer, bbox_tensors) elif model_name == 'yolov4': feature_maps = YOLOv4(input_layer, NUM_CLASS) bbox_tensors = [] for i, fm in enumerate(feature_maps): bbox_tensor = ops.decode(fm, NUM_CLASS) bbox_tensors.append(bbox_tensor) model = tf.keras.Model(input_layer, bbox_tensors) else: model = None raise ValueError if weight_path.split(".")[-1] == "weights": if model_name == 'yolov3_tiny': utils.load_weights_tiny(model, weight_path) print('load yolo tiny 3') elif model_name == 'yolov3': utils.load_weights_v3(model, weight_path) print('load yolo 3') elif model_name == 'yolov4': utils.load_weights(model, weight_path) print('load yolo 4') else: raise ValueError else: model.load_weights(weight_path).expect_partial() print('Restoring weights from: %s ' % weight_path) return model
def transfer_tflite(model_name, weight_path, output, input_size): assert model_name in ['yolov3_tiny', 'yolov3', 'yolov4'] NUM_CLASS = len(utils.read_class_names(cfg.YOLO.CLASSES)) input_layer = tf.keras.layers.Input([input_size, input_size, 3]) if model_name == 'yolov3_tiny': feature_maps = YOLOv3_tiny(input_layer, NUM_CLASS) bbox_tensors = [] for i, fm in enumerate(feature_maps): bbox_tensor = ops.decode(fm, NUM_CLASS) bbox_tensors.append(bbox_tensor) model = tf.keras.Model(input_layer, bbox_tensors) elif model_name == 'yolov3': feature_maps = YOLOv3(input_layer, NUM_CLASS) bbox_tensors = [] for i, fm in enumerate(feature_maps): bbox_tensor = ops.decode(fm, NUM_CLASS) bbox_tensors.append(bbox_tensor) model = tf.keras.Model(input_layer, bbox_tensors) elif model_name == 'yolov4': feature_maps = YOLOv4(input_layer, NUM_CLASS) bbox_tensors = [] for i, fm in enumerate(feature_maps): bbox_tensor = ops.decode(fm, NUM_CLASS) bbox_tensors.append(bbox_tensor) model = tf.keras.Model(input_layer, bbox_tensors) else: model = None raise ValueError if weight_path.split(".")[-1] == "weights": if model_name == 'yolov3_tiny': utils.load_weights_tiny(model, weight_path) elif model_name == ' yolov3': utils.load_weights_v3(model, weight_path) elif model_name == 'yolov4': utils.load_weights(model, weight_path) else: raise ValueError else: model.load_weights(weight_path).expect_partial() print('Restoring weights from: %s ... ' % weight_path) converter = tf.lite.TFLiteConverter.from_keras_model(model) # converter.optimizations = [tf.lite.Optimize.OPTIMIZE_FOR_SIZE] tflite_model = converter.convert() open(output, 'wb').write(tflite_model)
def detect(model_name, weight_path, input_size, image_path, framework): assert model_name in ['yolov3_tiny', 'yolov3', 'yolov4'] if model_name == 'yolov3_tiny': STRIDES = np.array(cfg.YOLO.STRIDES_TINY) ANCHORS = utils.get_anchors(cfg.YOLO.ANCHORS_TINY, True) elif model_name == 'yolov3': STRIDES = np.array(cfg.YOLO.STRIDES) ANCHORS = utils.get_anchors(cfg.YOLO.ANCHORS_V3, False) elif model_name == 'yolov4': STRIDES = np.array(cfg.YOLO.STRIDES) ANCHORS = utils.get_anchors(cfg.YOLO.ANCHORS, False) else: raise ValueError NUM_CLASS = len(utils.read_class_names(cfg.YOLO.CLASSES)) XYSCALE = cfg.YOLO.XYSCALE original_image = cv2.imread(image_path) original_image = cv2.cvtColor(original_image, cv2.COLOR_BGR2RGB) original_image_size = original_image.shape[:2] image_data = utils.image_preprocess(np.copy(original_image), [input_size, input_size]) image_data = image_data[np.newaxis, ...].astype(np.float32) if framework == 'tf': input_layer = tf.keras.layers.Input([input_size, input_size, 3]) if model_name == 'yolov3_tiny': feature_maps = YOLOv3_tiny(input_layer, NUM_CLASS) bbox_tensors = [] for i, fm in enumerate(feature_maps): bbox_tensor = ops.decode(fm, NUM_CLASS) bbox_tensors.append(bbox_tensor) model = tf.keras.Model(input_layer, bbox_tensors) elif model_name == 'yolov3': feature_maps = YOLOv3(input_layer, NUM_CLASS) bbox_tensors = [] for i, fm in enumerate(feature_maps): bbox_tensor = ops.decode(fm, NUM_CLASS) bbox_tensors.append(bbox_tensor) model = tf.keras.Model(input_layer, bbox_tensors) elif model_name == 'yolov4': feature_maps = YOLOv4(input_layer, NUM_CLASS) bbox_tensors = [] for i, fm in enumerate(feature_maps): bbox_tensor = ops.decode(fm, NUM_CLASS) bbox_tensors.append(bbox_tensor) model = tf.keras.Model(input_layer, bbox_tensors) else: model = None raise ValueError if weight_path.split(".")[-1] == "weights": if model_name == 'yolov3_tiny': utils.load_weights_tiny(model, weight_path) # utils.extract_weights_tiny(model, weight_path) print('load yolo tiny 3') elif model_name == 'yolov3': utils.load_weights_v3(model, weight_path) print('load yolo 3') elif model_name == 'yolov4': utils.load_weights(model, weight_path) print('load yolo 4') else: raise ValueError elif weight_path.split(".")[-1] == "npy": if model_name == 'yolov3_tiny': # utils.load_weights_tiny_npy(model, weight_path) print('load yolo tiny 3 npy') else: model.load_weights(weight_path) print('Restoring weights from: %s ' % weight_path) # weight = np.load('D:\\coursera\\YoLoSerirs\\checkpoint\\yolo3_tiny.npy', allow_pickle=True) # model.set_weights(weight) # model.summary() start_time = time.time() pred_bbox = model.predict(image_data) print(time.time() - start_time) else: # Load TFLite model and allocate tensors. interpreter = tf.lite.Interpreter(model_path=weight_path) interpreter.allocate_tensors() # Get input and output tensors. input_details = interpreter.get_input_details() output_details = interpreter.get_output_details() print(input_details) print(output_details) interpreter.set_tensor(input_details[0]['index'], image_data) start_time = time.time() interpreter.invoke() pred_bbox = [ interpreter.get_tensor(output_details[i]['index']) for i in range(len(output_details)) ] print(time.time() - start_time) if model_name == 'yolov4': pred_bbox = utils.postprocess_bbbox(pred_bbox, ANCHORS, STRIDES, XYSCALE) else: pred_bbox = utils.postprocess_bbbox(pred_bbox, ANCHORS, STRIDES) bboxes = utils.postprocess_boxes(pred_bbox, original_image_size, input_size, 0.5) bboxes = utils.nms(bboxes, 0.3, method='nms') image = visualize.draw_bbox(original_image, bboxes) image = Image.fromarray(image) image.show()
def train(model_name, weight_path, save_path, logdir=None): assert model_name in ['yolov3_tiny', 'yolov3', 'yolov4'] physical_devices = tf.config.experimental.list_physical_devices('GPU') if len(physical_devices) > 0: tf.config.experimental.set_memory_growth(physical_devices[0], True) NUM_CLASS = len(utils.read_class_names(cfg.YOLO.CLASSES)) STRIDES = np.array(cfg.YOLO.STRIDES) IOU_LOSS_THRESH = cfg.YOLO.IOU_LOSS_THRESH XYSCALE = cfg.YOLO.XYSCALE ANCHORS = utils.get_anchors(cfg.YOLO.ANCHORS) trainset = Dataset('train') testset = Dataset('test') isfreeze = False steps_per_epoch = len(trainset) first_stage_epochs = cfg.TRAIN.FISRT_STAGE_EPOCHS second_stage_epochs = cfg.TRAIN.SECOND_STAGE_EPOCHS global_steps = tf.Variable(1, trainable=False, dtype=tf.int64) warmup_steps = cfg.TRAIN.WARMUP_EPOCHS * steps_per_epoch total_steps = (first_stage_epochs + second_stage_epochs) * steps_per_epoch input_layer = tf.keras.layers.Input([cfg.TRAIN.INPUT_SIZE, cfg.TRAIN.INPUT_SIZE, 3]) if model_name=='yolov3_tiny': feature_maps = YOLOv3_tiny(input_layer, NUM_CLASS) bbox_tensors = [] for i, fm in enumerate(feature_maps): bbox_tensor = ops.decode_train(fm, NUM_CLASS, STRIDES, ANCHORS, i) bbox_tensors.append(fm) bbox_tensors.append(bbox_tensor) model = tf.keras.Model(input_layer, bbox_tensors) elif model_name=='yolov3': feature_maps = YOLOv3(input_layer, NUM_CLASS) bbox_tensors = [] for i, fm in enumerate(feature_maps): bbox_tensor = ops.decode_train(fm, NUM_CLASS, STRIDES, ANCHORS, i) bbox_tensors.append(fm) bbox_tensors.append(bbox_tensor) model = tf.keras.Model(input_layer, bbox_tensors) elif model_name=='yolov4': feature_maps = YOLOv4(input_layer, NUM_CLASS) bbox_tensors = [] for i, fm in enumerate(feature_maps): bbox_tensor = ops.decode_train(fm, NUM_CLASS, STRIDES, ANCHORS, i, XYSCALE) bbox_tensors.append(fm) bbox_tensors.append(bbox_tensor) model = tf.keras.Model(input_layer, bbox_tensors) else: raise ValueError # for name in ['conv2d_93', 'conv2d_101', 'conv2d_109']: # layer = model.get_layer(name) # print(layer.name, layer.output_shape) if weight_path: if weight_path.split(".")[-1] == "weights": if model_name == 'yolov3_tiny': utils.load_weights_tiny(model, weight_path) elif model_name=='yolov3': utils.load_weights_v3(model, weight_path) elif model_name=='yolov4': utils.load_weights(model, weight_path) else: raise ValueError else: model.load_weights(weight_path) print('Restoring weights from: %s ... ' % weight_path) optimizer = tf.keras.optimizers.Adam() if logdir: if os.path.exists(logdir): shutil.rmtree(logdir) writer = tf.summary.create_file_writer(logdir) else: writer = None def train_step(image_data, target): with tf.GradientTape() as tape: pred_result = model(image_data, training=True) giou_loss = conf_loss = prob_loss = 0 # optimizing process for i in range(3): conv, pred = pred_result[i * 2], pred_result[i * 2 + 1] loss_items = ops.compute_loss(pred, conv, target[i][0], target[i][1], STRIDES=STRIDES, NUM_CLASS=NUM_CLASS, IOU_LOSS_THRESH=IOU_LOSS_THRESH, i=i) giou_loss += loss_items[0] conf_loss += loss_items[1] prob_loss += loss_items[2] total_loss = giou_loss + conf_loss + prob_loss gradients = tape.gradient(total_loss, model.trainable_variables) optimizer.apply_gradients(zip(gradients, model.trainable_variables)) tf.print("=> STEP %4d lr: %.6f giou_loss: %4.2f conf_loss: %4.2f " "prob_loss: %4.2f total_loss: %4.2f" % (global_steps, optimizer.lr.numpy(), giou_loss, conf_loss, prob_loss, total_loss)) # update learning rate global_steps.assign_add(1) if global_steps < warmup_steps: lr = global_steps / warmup_steps * cfg.TRAIN.LR_INIT else: lr = cfg.TRAIN.LR_END + \ 0.5*(cfg.TRAIN.LR_INIT - cfg.TRAIN.LR_END) * \ ((1 + tf.cos((global_steps - warmup_steps) / (total_steps - warmup_steps) * np.pi))) optimizer.lr.assign(lr.numpy()) # if writer: # # writing summary data # with writer.as_default(): # tf.summary.scalar("lr", optimizer.lr, step=global_steps) # tf.summary.scalar("loss/total_loss", total_loss, step=global_steps) # tf.summary.scalar("loss/giou_loss", giou_loss, step=global_steps) # tf.summary.scalar("loss/conf_loss", conf_loss, step=global_steps) # tf.summary.scalar("loss/prob_loss", prob_loss, step=global_steps) # writer.flush() def test_step(image_data, target): pred_result = model(image_data, training=True) giou_loss = conf_loss = prob_loss = 0 # optimizing process for i in range(3): conv, pred = pred_result[i * 2], pred_result[i * 2 + 1] loss_items = ops.compute_loss(pred, conv, target[i][0], target[i][1], STRIDES=STRIDES, NUM_CLASS=NUM_CLASS, IOU_LOSS_THRESH=IOU_LOSS_THRESH, i=i) giou_loss += loss_items[0] conf_loss += loss_items[1] prob_loss += loss_items[2] total_loss = giou_loss + conf_loss + prob_loss tf.print("=> TEST STEP %4d giou_loss: %4.2f conf_loss: %4.2f " "prob_loss: %4.2f total_loss: %4.2f" % (global_steps, giou_loss, conf_loss, prob_loss, total_loss)) for epoch in range(first_stage_epochs + second_stage_epochs): if epoch < first_stage_epochs: if not isfreeze: isfreeze = True for name in ['conv2d_93', 'conv2d_101', 'conv2d_109']: freeze = model.get_layer(name) ops.freeze_all(freeze) elif epoch >= first_stage_epochs: if isfreeze: isfreeze = False for name in ['conv2d_93', 'conv2d_101', 'conv2d_109']: freeze = model.get_layer(name) ops.unfreeze_all(freeze) for image_data, target in trainset: train_step(image_data, target) for image_data, target in testset: test_step(image_data, target) if save_path: model.save_weights(save_path)
def evaluate(model_name, weight_path): assert model_name in ['yolov3_tiny', 'yolov3', 'yolov4'] physical_devices = tf.config.experimental.list_physical_devices('GPU') if len(physical_devices) > 0: tf.config.experimental.set_memory_growth(physical_devices[0], True) NUM_CLASS = len(utils.read_class_names(cfg.YOLO.CLASSES)) STRIDES = np.array(cfg.YOLO.STRIDES) IOU_LOSS_THRESH = cfg.YOLO.IOU_LOSS_THRESH XYSCALE = cfg.YOLO.XYSCALE ANCHORS = utils.get_anchors(cfg.YOLO.ANCHORS) trainset = Dataset('train') isfreeze = False steps_per_epoch = len(trainset) first_stage_epochs = cfg.TRAIN.FISRT_STAGE_EPOCHS second_stage_epochs = cfg.TRAIN.SECOND_STAGE_EPOCHS global_steps = tf.Variable(1, trainable=False, dtype=tf.int64) warmup_steps = cfg.TRAIN.WARMUP_EPOCHS * steps_per_epoch total_steps = (first_stage_epochs + second_stage_epochs) * steps_per_epoch input_layer = tf.keras.layers.Input([cfg.TRAIN.INPUT_SIZE, cfg.TRAIN.INPUT_SIZE, 3]) if model_name=='yolov3_tiny': feature_maps = YOLOv3_tiny(input_layer, NUM_CLASS) bbox_tensors = [] for i, fm in enumerate(feature_maps): bbox_tensor = ops.decode_train(fm, NUM_CLASS, STRIDES, ANCHORS, i) bbox_tensors.append(fm) bbox_tensors.append(bbox_tensor) model = tf.keras.Model(input_layer, bbox_tensors) elif model_name=='yolov3': feature_maps = YOLOv3(input_layer, NUM_CLASS) bbox_tensors = [] for i, fm in enumerate(feature_maps): bbox_tensor = ops.decode_train(fm, NUM_CLASS, STRIDES, ANCHORS, i) bbox_tensors.append(fm) bbox_tensors.append(bbox_tensor) model = tf.keras.Model(input_layer, bbox_tensors) elif model_name=='yolov4': feature_maps = YOLOv4(input_layer, NUM_CLASS) bbox_tensors = [] for i, fm in enumerate(feature_maps): bbox_tensor = ops.decode_train(fm, NUM_CLASS, STRIDES, ANCHORS, i, XYSCALE) bbox_tensors.append(fm) bbox_tensors.append(bbox_tensor) model = tf.keras.Model(input_layer, bbox_tensors) else: raise ValueError if weight_path: if weight_path.split(".")[-1] == "weights": if model_name == 'yolov3_tiny': utils.load_weights_tiny(model, weight_path) elif model_name=='yolov3': utils.load_weights_v3(model, weight_path) elif model_name=='yolov4': utils.load_weights(model, weight_path) else: raise ValueError else: model.load_weights(weight_path) print('Restoring weights from: %s ... ' % weight_path) trainset = Dataset('train') for image_data, target in trainset: pred_result = model(image_data, training=True) giou_loss = conf_loss = prob_loss = 0 for i in range(3): conv, pred = pred_result[i * 2], pred_result[i * 2 + 1] loss_items = ops.compute_loss(pred, conv, target[i][0], target[i][1], STRIDES=STRIDES, NUM_CLASS=NUM_CLASS, IOU_LOSS_THRESH=IOU_LOSS_THRESH, i=i) giou_loss += loss_items[0] conf_loss += loss_items[1] prob_loss += loss_items[2] total_loss = giou_loss + conf_loss + prob_loss tf.print("=> STEP %4d giou_loss: %4.2f conf_loss: %4.2f " "prob_loss: %4.2f total_loss: %4.2f" % (global_steps, giou_loss, conf_loss, prob_loss, total_loss))
def prune_train(model_name, weight_path, logdir, save_path, epoches): assert model_name in ['yolov3_tiny', 'yolov3', 'yolov4'] physical_devices = tf.config.experimental.list_physical_devices('GPU') if len(physical_devices) > 0: tf.config.experimental.set_memory_growth(physical_devices[0], True) NUM_CLASS = len(utils.read_class_names(cfg.YOLO.CLASSES)) STRIDES = np.array(cfg.YOLO.STRIDES) IOU_LOSS_THRESH = cfg.YOLO.IOU_LOSS_THRESH XYSCALE = cfg.YOLO.XYSCALE ANCHORS = utils.get_anchors(cfg.YOLO.ANCHORS) trainset = Dataset('train') isfreeze = False steps_per_epoch = len(trainset) first_stage_epochs = cfg.TRAIN.FISRT_STAGE_EPOCHS second_stage_epochs = cfg.TRAIN.SECOND_STAGE_EPOCHS global_steps = tf.Variable(1, trainable=False, dtype=tf.int64) warmup_steps = cfg.TRAIN.WARMUP_EPOCHS * steps_per_epoch total_steps = (first_stage_epochs + second_stage_epochs) * steps_per_epoch input_layer = tf.keras.layers.Input( [cfg.TRAIN.INPUT_SIZE, cfg.TRAIN.INPUT_SIZE, 3]) if model_name == 'yolov3_tiny': feature_maps = YOLOv3_tiny(input_layer, NUM_CLASS) bbox_tensors = [] for i, fm in enumerate(feature_maps): bbox_tensor = ops.decode_train(fm, NUM_CLASS, STRIDES, ANCHORS, i) bbox_tensors.append(fm) bbox_tensors.append(bbox_tensor) model = tf.keras.Model(input_layer, bbox_tensors) elif model_name == 'yolov3': feature_maps = YOLOv3(input_layer, NUM_CLASS) bbox_tensors = [] for i, fm in enumerate(feature_maps): bbox_tensor = ops.decode_train(fm, NUM_CLASS, STRIDES, ANCHORS, i) bbox_tensors.append(fm) bbox_tensors.append(bbox_tensor) model = tf.keras.Model(input_layer, bbox_tensors) elif model_name == 'yolov4': feature_maps = YOLOv4(input_layer, NUM_CLASS) bbox_tensors = [] for i, fm in enumerate(feature_maps): bbox_tensor = ops.decode_train(fm, NUM_CLASS, STRIDES, ANCHORS, i, XYSCALE) bbox_tensors.append(fm) bbox_tensors.append(bbox_tensor) model = tf.keras.Model(input_layer, bbox_tensors) else: raise ValueError if weight_path: if weight_path.split(".")[-1] == "weights": if model_name == 'yolov3_tiny': utils.load_weights_tiny(model, weight_path) elif model_name == 'yolov3': utils.load_weights_v3(model, weight_path) elif model_name == 'yolov4': utils.load_weights(model, weight_path) else: raise ValueError else: model.load_weights(weight_path) print('Restoring weights from: %s ... ' % weight_path) optimizer = tf.keras.optimizers.Adam(learning_rate=0.0001) if os.path.exists(logdir): shutil.rmtree(logdir) # for layer in model.layers: # print(layer.name, isinstance(layer, tf.keras.layers.Conv2D)) def apply_pruning_to_dense(layer): if isinstance(layer, tf.keras.layers.Conv2D): return tfmot.sparsity.keras.prune_low_magnitude(layer) return layer # Use `tf.keras.models.clone_model` to apply `apply_pruning_to_dense` # to the layers of the model. model_for_pruning = tf.keras.models.clone_model( model, clone_function=apply_pruning_to_dense, ) # model_for_pruning.summary() unused_arg = -1 model_for_pruning.optimizer = optimizer step_callback = tfmot.sparsity.keras.UpdatePruningStep() step_callback.set_model(model_for_pruning) log_callback = tfmot.sparsity.keras.PruningSummaries( log_dir=logdir) # Log sparsity and other metrics in Tensorboard. log_callback.set_model(model_for_pruning) step_callback.on_train_begin() # run pruning callback for epoch in range(epoches): log_callback.on_epoch_begin(epoch=unused_arg) # run pruning callback for image_data, target in trainset: step_callback.on_train_batch_begin( batch=unused_arg) # run pruning callback with tf.GradientTape() as tape: pred_result = model_for_pruning(image_data, training=True) giou_loss = conf_loss = prob_loss = 0 # optimizing process for i in range(3): conv, pred = pred_result[i * 2], pred_result[i * 2 + 1] loss_items = ops.compute_loss( pred, conv, target[i][0], target[i][1], STRIDES=STRIDES, NUM_CLASS=NUM_CLASS, IOU_LOSS_THRESH=IOU_LOSS_THRESH, i=i) giou_loss += loss_items[0] conf_loss += loss_items[1] prob_loss += loss_items[2] total_loss = giou_loss + conf_loss + prob_loss gradients = tape.gradient( total_loss, model_for_pruning.trainable_variables) optimizer.apply_gradients( zip(gradients, model_for_pruning.trainable_variables)) tf.print( "=> STEP %4d lr: %.6f giou_loss: %4.2f conf_loss: %4.2f " "prob_loss: %4.2f total_loss: %4.2f" % (global_steps, optimizer.lr.numpy(), giou_loss, conf_loss, prob_loss, total_loss)) step_callback.on_epoch_end(batch=unused_arg) # run pruning callback model_for_export = tfmot.sparsity.keras.strip_pruning(model_for_pruning) return model_for_export
def save_tflite(model_name, weight_path, quantize_mode, output, input_size): assert model_name in ['yolov3_tiny', 'yolov3', 'yolov4'] assert quantize_mode in ['int8', 'float16', 'full_int8'] NUM_CLASS = len(utils.read_class_names(cfg.YOLO.CLASSES)) input_layer = tf.keras.layers.Input([input_size, input_size, 3]) if model_name == 'yolov3_tiny': feature_maps = YOLOv3_tiny(input_layer, NUM_CLASS) bbox_tensors = [] for i, fm in enumerate(feature_maps): bbox_tensor = ops.decode(fm, NUM_CLASS) bbox_tensors.append(bbox_tensor) model = tf.keras.Model(input_layer, bbox_tensors) elif model_name == 'yolov3': feature_maps = YOLOv3(input_layer, NUM_CLASS) bbox_tensors = [] for i, fm in enumerate(feature_maps): bbox_tensor = ops.decode(fm, NUM_CLASS) bbox_tensors.append(bbox_tensor) model = tf.keras.Model(input_layer, bbox_tensors) elif model_name == 'yolov4': feature_maps = YOLOv4(input_layer, NUM_CLASS) bbox_tensors = [] for i, fm in enumerate(feature_maps): bbox_tensor = ops.decode(fm, NUM_CLASS) bbox_tensors.append(bbox_tensor) model = tf.keras.Model(input_layer, bbox_tensors) else: model = None raise ValueError if weight_path.split(".")[-1] == "weights": if model_name == 'yolov3_tiny': utils.load_weights_tiny(model, weight_path) elif model_name == ' yolov3': utils.load_weights_v3(model, weight_path) elif model_name == 'yolov4': utils.load_weights(model, weight_path) else: raise ValueError else: model.load_weights(weight_path).expect_partial() print('Restoring weights from: %s ... ' % weight_path) # model.summary() converter = tf.lite.TFLiteConverter.from_keras_model(model) if tf.__version__ >= '2.2.0': converter.experimental_new_converter = False if quantize_mode == 'int8': converter.optimizations = [tf.lite.Optimize.DEFAULT] elif quantize_mode == 'float16': converter.optimizations = [tf.lite.Optimize.DEFAULT] converter.target_spec.supported_types = [ tf.compat.v1.lite.constants.FLOAT16 ] elif quantize_mode == 'full_int8': converter.target_spec.supported_ops = [ tf.lite.OpsSet.TFLITE_BUILTINS_INT8 ] converter.optimizations = [tf.lite.Optimize.DEFAULT] converter.target_spec.supported_ops = [ tf.lite.OpsSet.TFLITE_BUILTINS, tf.lite.OpsSet.SELECT_TF_OPS ] converter.allow_custom_ops = True converter.representative_dataset = representative_data_gen else: raise ValueError tflite_model = converter.convert() open(output, 'wb').write(tflite_model) logging.info("model saved to: {}".format(output))