コード例 #1
0
ファイル: train.py プロジェクト: delldu/StyleGANm
def main(args):
    cfg = load_cfg(args.cfg)
    distiller = Distiller(cfg)
    if args.ckpt is not None:
        ckpt = model_zoo(args.ckpt)
        load_weights(distiller, ckpt["state_dict"])
    logger = build_logger(cfg.logger)
    checkpoint_callback = pl.callbacks.ModelCheckpoint(
        filepath=os.getcwd() if args.checkpoint_dir is None else args.checkpoint_dir,
        save_top_k=True,
        save_last=True,
        verbose=True,
        monitor=cfg.trainer.monitor,
        mode=cfg.trainer.monitor_mode,
        prefix=''
    )
    trainer = pl.Trainer(
        gpus=args.gpus,
        max_epochs=cfg.trainer.max_epochs,
        accumulate_grad_batches=args.grad_batches,
        distributed_backend=args.distributed_backend,
        checkpoint_callback=checkpoint_callback,
        val_check_interval=args.val_check_interval,
        logger=logger
    )
    if args.to_onnx is None:
        trainer.fit(distiller)
    else:
        distiller.to_onnx(args.to_onnx)
コード例 #2
0
def test_image(image_path, model_path):
    input_size = 416
    original_image      = cv2.imread(image_path)
    original_image      = cv2.cvtColor(original_image, cv2.COLOR_BGR2RGB)
    original_image_size = original_image.shape[:2]

    image_data = utils.image_preporcess(np.copy(original_image), [input_size, input_size])
    image_data = image_data[np.newaxis, ...].astype(np.float32)

    model = yolov3.build_for_test()
    # 加载tf model:model.load_weights(model_path);加载darknet model: utils.load_weights(model, model_path)
    utils.load_weights(model, model_path)
    model.summary()
    start_time = time.time()
    pred_bbox = model.predict(image_data)
    print('pred_bbox>>>>>>>>>>>>>>>>>', pred_bbox)
    end_time = time.time()
    print("time: %.2f ms" %(1000*(end_time-start_time)))

    pred_bbox = [tf.reshape(x, (-1, tf.shape(x)[-1])) for x in pred_bbox]
    pred_bbox = tf.concat(pred_bbox, axis=0)
    # 将416×416下的bbox坐标转换为原图上的坐标并删除部分无效box
    bboxes = utils.postprocess_boxes(pred_bbox, original_image_size, input_size, 0.3)
    bboxes = utils.nms(bboxes, 0.45, method='nms')
    # 构建原图和bbox画出坐标框
    image = utils.draw_bbox(original_image, bboxes)
    image = Image.fromarray(image)
    image.show()
コード例 #3
0
def save_tf():
  NUM_CLASS = len(utils.read_class_names(cfg.YOLO.CLASSES))
  input_layer = tf.keras.layers.Input([FLAGS.input_size, FLAGS.input_size, 3])
  if FLAGS.tiny:
    feature_maps = YOLOv3_tiny(input_layer, NUM_CLASS)
    bbox_tensors = []
    for i, fm in enumerate(feature_maps):
      bbox_tensor = decode(fm, NUM_CLASS, i)
      bbox_tensors.append(bbox_tensor)
    model = tf.keras.Model(input_layer, bbox_tensors)
    utils.load_weights_tiny(model, FLAGS.weights)
  else:
    if FLAGS.model == 'yolov3':
      feature_maps = YOLOv3(input_layer, NUM_CLASS)
      bbox_tensors = []
      for i, fm in enumerate(feature_maps):
        bbox_tensor = decode(fm, NUM_CLASS, i)
        bbox_tensors.append(bbox_tensor)
      model = tf.keras.Model(input_layer, bbox_tensors)
      utils.load_weights_v3(model, FLAGS.weights)
    elif FLAGS.model == 'yolov4':
      feature_maps = YOLOv4(input_layer, NUM_CLASS)
      bbox_tensors = []
      for i, fm in enumerate(feature_maps):
        bbox_tensor = decode(fm, NUM_CLASS, i)
        bbox_tensors.append(bbox_tensor)
      model = tf.keras.Model(input_layer, bbox_tensors)
      utils.load_weights(model, FLAGS.weights)
    else:
      print("model option can be only 'yolov3' or 'yolov4'.")
      return

  model.summary()

  model.save(FLAGS.output)
コード例 #4
0
ファイル: save_model.py プロジェクト: hsji0/JHS
def save_tf():
    STRIDES, ANCHORS, NUM_CLASS, XYSCALE = utils.load_config(FLAGS)
    print("load_config XYSCALE:{}".format(XYSCALE))
    input_layer = tf.keras.layers.Input(
        [FLAGS.input_size, FLAGS.input_size, 3])
    feature_maps = YOLO(input_layer, NUM_CLASS, FLAGS.model,
                        FLAGS.num_detection_layer)
    bbox_tensors = []
    prob_tensors = []
    if FLAGS.num_detection_layer == 1:  # yolo-custom
        output_tensors = decode(feature_maps[0], FLAGS.input_size // 32,
                                NUM_CLASS, STRIDES, ANCHORS, 0, XYSCALE,
                                FLAGS.framework)
        bbox_tensors.append(output_tensors[0])
        prob_tensors.append(output_tensors[1])
    elif FLAGS.num_detection_layer == 2:  # yolo-tiny
        for i, fm in enumerate(feature_maps):
            if i == 0:
                output_tensors = decode(fm, FLAGS.input_size // 16, NUM_CLASS,
                                        STRIDES, ANCHORS, i, XYSCALE,
                                        FLAGS.framework)
            else:
                output_tensors = decode(fm, FLAGS.input_size // 32, NUM_CLASS,
                                        STRIDES, ANCHORS, i, XYSCALE,
                                        FLAGS.framework)
            bbox_tensors.append(output_tensors[0])
            prob_tensors.append(output_tensors[1])
    elif FLAGS.num_detection_layer == 3:  # yolo
        for i, fm in enumerate(feature_maps):
            print("i:{}".format(i))
            if i == 0:
                output_tensors = decode(fm, FLAGS.input_size // 8, NUM_CLASS,
                                        STRIDES, ANCHORS, i, XYSCALE,
                                        FLAGS.framework)
            elif i == 1:
                output_tensors = decode(fm, FLAGS.input_size // 16, NUM_CLASS,
                                        STRIDES, ANCHORS, i, XYSCALE,
                                        FLAGS.framework)
            else:
                output_tensors = decode(fm, FLAGS.input_size // 32, NUM_CLASS,
                                        STRIDES, ANCHORS, i, XYSCALE,
                                        FLAGS.framework)
            bbox_tensors.append(output_tensors[0])
            prob_tensors.append(output_tensors[1])
    pred_bbox = tf.concat(bbox_tensors, axis=1)
    pred_prob = tf.concat(prob_tensors, axis=1)
    if FLAGS.framework == 'tflite':
        pred = (pred_bbox, pred_prob)
    else:
        boxes, pred_conf = filter_boxes(
            pred_bbox,
            pred_prob,
            score_threshold=FLAGS.score_thres,
            input_shape=tf.constant([FLAGS.input_size, FLAGS.input_size]))
        pred = tf.concat([boxes, pred_conf], axis=-1)
    model = tf.keras.Model(input_layer, pred)
    utils.load_weights(model, FLAGS.weights, FLAGS.model,
                       FLAGS.num_detection_layer)
    model.summary()
    model.save(FLAGS.output)
コード例 #5
0
def save_tf():
    STRIDES, ANCHORS, NUM_CLASS, XYSCALE = utils.load_config(FLAGS)

    input_layer = tf.keras.layers.Input(
        [FLAGS.input_size, FLAGS.input_size, 3])
    feature_maps = YOLOv4(input_layer, NUM_CLASS)
    bbox_tensors = []
    prob_tensors = []

    for i, fm in enumerate(feature_maps):
        if i == 0:
            output_tensors = decode(fm, FLAGS.input_size // 8, NUM_CLASS,
                                    STRIDES, ANCHORS, i, XYSCALE)
        elif i == 1:
            output_tensors = decode(fm, FLAGS.input_size // 16, NUM_CLASS,
                                    STRIDES, ANCHORS, i, XYSCALE)
        else:
            output_tensors = decode(fm, FLAGS.input_size // 32, NUM_CLASS,
                                    STRIDES, ANCHORS, i, XYSCALE)
        bbox_tensors.append(output_tensors[0])
        prob_tensors.append(output_tensors[1])

    pred_bbox = tf.concat(bbox_tensors, axis=1)
    pred_prob = tf.concat(prob_tensors, axis=1)

    boxes, pred_conf = filter_boxes(pred_bbox,
                                    pred_prob,
                                    score_threshold=FLAGS.score_thres,
                                    input_shape=tf.constant(
                                        [FLAGS.input_size, FLAGS.input_size]))
    pred = tf.concat([boxes, pred_conf], axis=-1)
    model = tf.keras.Model(input_layer, pred)
    utils.load_weights(model, FLAGS.weights, FLAGS.model)
    model.summary()
    model.save(FLAGS.output)
コード例 #6
0
ファイル: save_model.py プロジェクト: NKKFu/mask-detection
def main(argv):

    weights = FLAGS.weights 
    input_size = FLAGS.size

    NUM_CLASS = 2

    print(f'[DEBUG][save_model] Path to weights : weights/{FLAGS.weights}')
    print(f'[DEBUG][save_model] Size : {FLAGS.size}')

    input_layer = tf.keras.layers.Input([input_size, input_size, 3])
    print(f'[INFO][save_model] Created input_layer of size {input_size}')
    print(f'[DEBUG][save_model] input_layer : {input_layer}')

    feature_maps = YOLOv4(input_layer, NUM_CLASS)
    print(f'[DEBUG][save_model] feature_maps : {feature_maps}')
    bbox_tensors = []
    for i, fm in enumerate(feature_maps):
        bbox_tensors.append(decode(fm, NUM_CLASS, i))

    model = tf.keras.Model(input_layer, bbox_tensors)
    utils.load_weights(model, 'weights/' + FLAGS.weights)

    print(f'[INFO][save_model] Saving model... ')

    model.save(f'models/{weights.split(".")[0]}-size-{input_size}.h5')
    
    print(f'[INFO][save_model] Model saved to models/{weights.split(".")[0]}-size-{input_size}.h5')
コード例 #7
0
def start_model(device):

    tf.executing_eagerly()

    #TODO will have to change when working with several gpus

    # strategy = tf.distribute.MirroredStrategy()

    with None:  #tf.device(device):
        #generate model
        input_layer = tf.keras.Input([INPUT_SIZE, INPUT_SIZE, 3])

        feature_maps = YOLOv4(input_layer, NUM_CLASS)
        bbox_tensors = []
        for i, fm in enumerate(feature_maps):
            bbox_tensor = decode(fm, NUM_CLASS, i)
            bbox_tensors.append(bbox_tensor)
        model = tf.keras.Model(input_layer, bbox_tensors)
        print('Model built')

        #force to run eagerly
        model.run_eagerly = True

        #load existing weights into model
        utils.load_weights(model, WEIGHTS)

    return model
コード例 #8
0
def main(_argv):
    NUM_CLASS = len(utils.read_class_names(cfg.YOLO.CLASSES))
    input_layer = tf.keras.layers.Input([FLAGS.input_size, FLAGS.input_size, 3])
    if FLAGS.tiny:
        feature_maps = YOLOv3_tiny(input_layer, NUM_CLASS)
        bbox_tensors = []
        for i, fm in enumerate(feature_maps):
            bbox_tensor = decode(fm, NUM_CLASS, i)
            bbox_tensors.append(bbox_tensor)
        model = tf.keras.Model(input_layer, bbox_tensors)
        utils.load_weights_tiny(model, FLAGS.weights)
    else:
        if FLAGS.model == 'yolov3':
            feature_maps = YOLOv3(input_layer, NUM_CLASS)
            bbox_tensors = []
            for i, fm in enumerate(feature_maps):
                bbox_tensor = decode(fm, NUM_CLASS, i)
                bbox_tensors.append(bbox_tensor)
            model = tf.keras.Model(input_layer, bbox_tensors)
            utils.load_weights_v3(model, FLAGS.weights)
        elif FLAGS.model == 'yolov4':
            feature_maps = YOLOv4(input_layer, NUM_CLASS)
            bbox_tensors = []
            for i, fm in enumerate(feature_maps):
                bbox_tensor = decode(fm, NUM_CLASS, i)
                bbox_tensors.append(bbox_tensor)
            model = tf.keras.Model(input_layer, bbox_tensors)
            utils.load_weights(model, FLAGS.weights)
    
    save_model(model)
    save_tflite(model)
    demo()
コード例 #9
0
def save_tflite():
  NUM_CLASS = len(utils.read_class_names(cfg.YOLO.CLASSES))
  input_layer = tf.keras.layers.Input([FLAGS.input_size, FLAGS.input_size, 3])
  if FLAGS.tiny:
    feature_maps = YOLOv3_tiny(input_layer, NUM_CLASS)
    bbox_tensors = []
    for i, fm in enumerate(feature_maps):
      bbox_tensor = decode(fm, NUM_CLASS, i)
      bbox_tensors.append(bbox_tensor)
    model = tf.keras.Model(input_layer, bbox_tensors)
    utils.load_weights_tiny(model, FLAGS.weights)
  else:
    if FLAGS.model == 'yolov3':
      feature_maps = YOLOv3(input_layer, NUM_CLASS)
      bbox_tensors = []
      for i, fm in enumerate(feature_maps):
        bbox_tensor = decode(fm, NUM_CLASS, i)
        bbox_tensors.append(bbox_tensor)
      model = tf.keras.Model(input_layer, bbox_tensors)
      utils.load_weights_v3(model, FLAGS.weights)
    elif FLAGS.model == 'yolov4':
      feature_maps = YOLOv4(input_layer, NUM_CLASS)
      bbox_tensors = []
      for i, fm in enumerate(feature_maps):
        bbox_tensor = decode(fm, NUM_CLASS, i)
        bbox_tensors.append(bbox_tensor)
      model = tf.keras.Model(input_layer, bbox_tensors)
      utils.load_weights(model, FLAGS.weights)

  model.summary()

    # Save Model, including Frozen graph
    model.save('./model')
コード例 #10
0
def save_tflite():
    NUM_CLASS = len(utils.read_class_names(cfg.YOLO.CLASSES))
    input_layer = tf.keras.layers.Input(
        [FLAGS.input_size, FLAGS.input_size, 3])
    if FLAGS.tiny:
        if FLAGS.model == 'yolov3':
            feature_maps = YOLOv3_tiny(input_layer, NUM_CLASS)
        else:
            feature_maps = YOLOv4_tiny(input_layer, NUM_CLASS)
        bbox_tensors = []
        for i, fm in enumerate(feature_maps):
            bbox_tensor = decode(fm, NUM_CLASS, i)
            bbox_tensors.append(bbox_tensor)
        model = tf.keras.Model(input_layer, bbox_tensors)
        utils.load_weights_tiny(model, FLAGS.weights)
    else:
        if FLAGS.model == 'yolov3':
            feature_maps = YOLOv3(input_layer, NUM_CLASS)
            bbox_tensors = []
            for i, fm in enumerate(feature_maps):
                bbox_tensor = decode(fm, NUM_CLASS, i)
                bbox_tensors.append(bbox_tensor)
            model = tf.keras.Model(input_layer, bbox_tensors)
            utils.load_weights_v3(model, FLAGS.weights)
        elif FLAGS.model == 'yolov4':
            feature_maps = YOLOv4(input_layer, NUM_CLASS)
            bbox_tensors = []
            for i, fm in enumerate(feature_maps):
                bbox_tensor = decode(fm, NUM_CLASS, i)
                bbox_tensors.append(bbox_tensor)
            model = tf.keras.Model(input_layer, bbox_tensors)
            utils.load_weights(model, FLAGS.weights)
    model.summary()

    converter = tf.lite.TFLiteConverter.from_keras_model(model)
    if tf.__version__ >= '2.2.0':
        converter.experimental_new_converter = False

    if FLAGS.quantize_mode == 'int8':
        converter.optimizations = [tf.lite.Optimize.DEFAULT]
    elif FLAGS.quantize_mode == 'float16':
        converter.optimizations = [tf.lite.Optimize.DEFAULT]
        converter.target_spec.supported_types = [
            tf.compat.v1.lite.constants.FLOAT16
        ]
    elif FLAGS.quantize_mode == 'full_int8':
        converter.target_spec.supported_ops = [
            tf.lite.OpsSet.TFLITE_BUILTINS_INT8
        ]
        converter.optimizations = [tf.lite.Optimize.DEFAULT]
        converter.target_spec.supported_ops = [
            tf.lite.OpsSet.TFLITE_BUILTINS, tf.lite.OpsSet.SELECT_TF_OPS
        ]
        converter.allow_custom_ops = True
        converter.representative_dataset = representative_data_gen

    tflite_model = converter.convert()
    open(FLAGS.output, 'wb').write(tflite_model)
    logging.info("model saved to: {}".format(FLAGS.output))
コード例 #11
0
def main(_argv):
    NUM_CLASS = len(utils.read_class_names(cfg.YOLO.CLASSES))
    input_size = FLAGS.size
    physical_devices = tf.config.experimental.list_physical_devices('GPU')
    if len(physical_devices) > 0:
        tf.config.experimental.set_memory_growth(physical_devices[0], True)
    if FLAGS.framework == 'tf':
        input_layer = tf.keras.layers.Input([input_size, input_size, 3])
        if FLAGS.tiny:
            feature_maps = YOLOv3_tiny(input_layer, NUM_CLASS)
            bbox_tensors = []
            for i, fm in enumerate(feature_maps):
                bbox_tensor = decode(fm, NUM_CLASS, i)
                bbox_tensors.append(bbox_tensor)
            model = tf.keras.Model(input_layer, bbox_tensors)
            utils.load_weights_tiny(model, FLAGS.weights)
        else:
            if FLAGS.model == 'yolov3':
                feature_maps = YOLOv3(input_layer, NUM_CLASS)
                bbox_tensors = []
                for i, fm in enumerate(feature_maps):
                    bbox_tensor = decode(fm, NUM_CLASS, i)
                    bbox_tensors.append(bbox_tensor)
                model = tf.keras.Model(input_layer, bbox_tensors)
                utils.load_weights_v3(model, FLAGS.weights)
            elif FLAGS.model == 'yolov4':
                feature_maps = YOLOv4(input_layer, NUM_CLASS)
                bbox_tensors = []
                for i, fm in enumerate(feature_maps):
                    bbox_tensor = decode(fm, NUM_CLASS, i)
                    bbox_tensors.append(bbox_tensor)
                model = tf.keras.Model(input_layer, bbox_tensors)
                utils.load_weights(model, FLAGS.weights)
    logging.info('weights loaded')

    # Test the TensorFlow Lite model on random input data.
    sum = 0
    for i in range(1000):
        img_raw = tf.image.decode_image(open(FLAGS.image, 'rb').read(),
                                        channels=3)

        original_image = cv2.imread(FLAGS.image)
        original_image = cv2.cvtColor(original_image, cv2.COLOR_BGR2RGB)
        original_image_size = original_image.shape[:2]
        image_data = utils.image_preporcess(np.copy(original_image),
                                            [FLAGS.size, FLAGS.size])
        image_data = image_data[np.newaxis, ...].astype(np.float32)

        prev_time = time.time()
        pred_bbox = model.predict(image_data)
        # pred_bbox = pred_bbox.numpy()
        curr_time = time.time()
        exec_time = curr_time - prev_time
        if i == 0: continue
        sum += (1000 / (1000 * exec_time))
        info = "average FPS:" + str(round(sum / i, 2)) + ", FPS: " + str(
            round((1000 / (1000 * exec_time)), 1))
        print(info)
def save_tf():
    if FLAGS.license:
        cfg.YOLO.CLASSES = "./data/classes/custom.names"
    else:
        cfg.YOLO.CLASSES = "./data/classes/char.names"
    STRIDES, ANCHORS, NUM_CLASS, XYSCALE = utils.load_config(FLAGS)
    #print(read_class_names(cfg.YOLO.CLASSES))

    input_layer = tf.keras.layers.Input(
        [FLAGS.input_size, FLAGS.input_size, 3])
    feature_maps = YOLO(input_layer, NUM_CLASS, FLAGS.model, FLAGS.tiny)
    bbox_tensors = []
    prob_tensors = []
    if FLAGS.tiny:
        for i, fm in enumerate(feature_maps):
            if i == 0:
                output_tensors = decode(fm, FLAGS.input_size // 16, NUM_CLASS,
                                        STRIDES, ANCHORS, i, XYSCALE,
                                        FLAGS.framework)
            else:
                output_tensors = decode(fm, FLAGS.input_size // 32, NUM_CLASS,
                                        STRIDES, ANCHORS, i, XYSCALE,
                                        FLAGS.framework)
            bbox_tensors.append(output_tensors[0])
            prob_tensors.append(output_tensors[1])
    else:
        for i, fm in enumerate(feature_maps):
            if i == 0:
                output_tensors = decode(fm, FLAGS.input_size // 8, NUM_CLASS,
                                        STRIDES, ANCHORS, i, XYSCALE,
                                        FLAGS.framework)
            elif i == 1:
                output_tensors = decode(fm, FLAGS.input_size // 16, NUM_CLASS,
                                        STRIDES, ANCHORS, i, XYSCALE,
                                        FLAGS.framework)
            else:
                output_tensors = decode(fm, FLAGS.input_size // 32, NUM_CLASS,
                                        STRIDES, ANCHORS, i, XYSCALE,
                                        FLAGS.framework)
            bbox_tensors.append(output_tensors[0])
            prob_tensors.append(output_tensors[1])
    pred_bbox = tf.concat(bbox_tensors, axis=1)
    pred_prob = tf.concat(prob_tensors, axis=1)
    if FLAGS.framework == 'tflite':
        pred = (pred_bbox, pred_prob)
    else:
        boxes, pred_conf = filter_boxes(
            pred_bbox,
            pred_prob,
            score_threshold=FLAGS.score_thres,
            input_shape=tf.constant([FLAGS.input_size, FLAGS.input_size]))
        pred = tf.concat([boxes, pred_conf], axis=-1)
    model = tf.keras.Model(input_layer, pred)
    utils.load_weights(model, FLAGS.weights, FLAGS.model, FLAGS.tiny)
    model.summary()
    model.save(FLAGS.output)
コード例 #13
0
 def create_model(self, weight=None):
     nb_gpu = len(GPU_DEVICES)
     if nb_gpu > 1:
         with tf.device('/cpu:0'):
             model = self._model_func()
             model = load_weights(model, weight)
         self.model = multi_gpu_model(model, gpus=nb_gpu)
     else:
         model = self._model_func()
         self.model = load_weights(model, weight)
     return self.model
コード例 #14
0
def save_tf():
    STRIDES, ANCHORS, NUM_CLASS, XYSCALE = utils.load_config(FLAGS)

    input_layer = tf.keras.layers.Input(
        [FLAGS.input_size, FLAGS.input_size, 3])
    feature_maps = YOLO(input_layer, NUM_CLASS, FLAGS.model, FLAGS.tiny)
    bbox_tensors = []
    prob_tensors = []
    if FLAGS.tiny:
        for i, fm in enumerate(feature_maps):
            if i == 0:
                output_tensors = decode(fm, FLAGS.input_size // 16, NUM_CLASS,
                                        STRIDES, ANCHORS, i, XYSCALE,
                                        FLAGS.framework)
            else:
                output_tensors = decode(fm, FLAGS.input_size // 32, NUM_CLASS,
                                        STRIDES, ANCHORS, i, XYSCALE,
                                        FLAGS.framework)
            bbox_tensors.append(output_tensors[0])
            prob_tensors.append(output_tensors[1])
    else:
        for i, fm in enumerate(feature_maps):
            with tf.name_scope("featuremap-" + str(i)) as scope:
                if i == 0:
                    output_tensors = decode(fm, FLAGS.input_size // 8,
                                            NUM_CLASS, STRIDES, ANCHORS, i,
                                            XYSCALE, FLAGS.framework)
                elif i == 1:
                    output_tensors = decode(fm, FLAGS.input_size // 16,
                                            NUM_CLASS, STRIDES, ANCHORS, i,
                                            XYSCALE, FLAGS.framework)
                else:
                    output_tensors = decode(fm, FLAGS.input_size // 32,
                                            NUM_CLASS, STRIDES, ANCHORS, i,
                                            XYSCALE, FLAGS.framework)
            bbox_tensors.append(output_tensors[0])
            prob_tensors.append(output_tensors[1])
    pred_bbox = tf.concat(bbox_tensors, axis=1)
    pred_prob = tf.concat(prob_tensors, axis=1)
    if FLAGS.framework == 'tflite':
        pred = (pred_bbox, pred_prob)
    else:
        boxes, pred_conf = filter_boxes(
            pred_bbox,
            pred_prob,
            score_threshold=FLAGS.score_thres,
            input_shape=tf.constant([FLAGS.input_size, FLAGS.input_size]))
        pred = tf.concat([boxes, pred_conf], axis=-1)
    #model = tf.keras.Model(input_layer, feature_maps)
    model = tf.keras.Model(input_layer, pred)
    utils.load_weights(model, FLAGS.weights, FLAGS.model, FLAGS.tiny)
    model.summary()
    model.save(FLAGS.output, save_format="h5")
コード例 #15
0
def get_object(video_url, threshold=0.45):
    input_layer = tf.keras.layers.Input([input_size, input_size, 3])
    feature_maps = YOLOv3(input_layer)

    bbox_tensors = []
    for i, fm in enumerate(feature_maps):
        bbox_tensor = decode(fm, i)
        bbox_tensors.append(bbox_tensor)

    model = tf.keras.Model(input_layer, bbox_tensors)
    utils.load_weights(model, "yolov3_union_10000.weights")
    model.summary()

    vid = cv2.VideoCapture(video_url)
    while True:
        return_value, frame = vid.read()
        if return_value:
            frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
        else:
            return "No image"
        frame_size = frame.shape[:2]
        image_data = utils.image_preporcess(np.copy(frame),
                                            [input_size, input_size])
        image_data = image_data[np.newaxis, ...].astype(np.float32)

        prev_time = time.time()
        pred_bbox = model.predict_on_batch(image_data)
        curr_time = time.time()
        exec_time = curr_time - prev_time

        pred_bbox = [tf.reshape(x, (-1, tf.shape(x)[-1])) for x in pred_bbox]
        pred_bbox = tf.concat(pred_bbox, axis=0)
        bboxes = utils.postprocess_boxes(pred_bbox, frame_size, input_size,
                                         0.3)
        bboxes = utils.nms(bboxes, threshold, method='nms')
        image = utils.draw_bbox(frame, bboxes)

        result = np.asarray(image)
        info = "time: %.2f ms" % (1000 * exec_time)
        cv2.putText(result,
                    text=info,
                    org=(50, 70),
                    fontFace=cv2.FONT_HERSHEY_SIMPLEX,
                    fontScale=1,
                    color=(255, 0, 0),
                    thickness=2)

        result = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)

        retval, buffer = cv2.imencode(".jpeg", image)
        yield ((b'--frame\r\n'
                b'Content-Type: image/jpeg\r\n\r\n' + buffer.tobytes() +
                b'\r\n'))
コード例 #16
0
    def build_model(self):
        input_layer = tf.keras.layers.Input([self.W, self.H, self.D])
        feature_maps = YOLOv3(input_layer)

        bbox_tensors = []
        for i, fm in enumerate(feature_maps):
            bbox_tensor = decode(fm, i)
            bbox_tensors.append(bbox_tensor)

        model = tf.keras.Model(input_layer, bbox_tensors)
        utils.load_weights(model, "yoloweights/yolov3.weights")
        #print(model.summary())
        return model
コード例 #17
0
def load_Yolo_model():
    """Load a yolo model and its weights for inference."""
    input_layer = tf.keras.layers.Input([None, None, 3])
    feature_maps = YOLOv3(input_layer)

    bbox_tensors = []
    for i, fm in enumerate(feature_maps):
        bbox_tensor = decode(fm, i)
        bbox_tensors.append(bbox_tensor)

    model = tf.keras.Model(input_layer, bbox_tensors)
    utils.load_weights(model, "./yolov3.weights")
    return model
コード例 #18
0
def save_tf(parameters):
    """Transform a darknet model of YOLO to a TensorFlow model

    Args:
        parameters (dictionary): input parameters
        - weights: path to the darknet weights
        - input_size: input size of the model
        - model: model to transform
        - weights_tf: path to save the tf weights
    Returns:
        [void]:
    """
    STRIDES, ANCHORS, NUM_CLASS, XYSCALE = utils.load_config(
        tiny=False, model=parameters['model'])

    input_layer = tf.keras.layers.Input(
        [parameters['input_size'], parameters['input_size'], 3])
    feature_maps = YOLO(input_layer, NUM_CLASS, parameters['model'], False)
    bbox_tensors = []
    prob_tensors = []
    for i, fm in enumerate(feature_maps):
        if i == 0:
            output_tensors = decode(fm, parameters['input_size'] // 8,
                                    NUM_CLASS, STRIDES, ANCHORS, i, XYSCALE,
                                    'tf')
        elif i == 1:
            output_tensors = decode(fm, parameters['input_size'] // 16,
                                    NUM_CLASS, STRIDES, ANCHORS, i, XYSCALE,
                                    'tf')
        else:
            output_tensors = decode(fm, parameters['input_size'] // 32,
                                    NUM_CLASS, STRIDES, ANCHORS, i, XYSCALE,
                                    'tf')
        bbox_tensors.append(output_tensors[0])
        prob_tensors.append(output_tensors[1])
    pred_bbox = tf.concat(bbox_tensors, axis=1)
    pred_prob = tf.concat(prob_tensors, axis=1)

    boxes, pred_conf = filter_boxes(pred_bbox,
                                    pred_prob,
                                    score_threshold=parameters['score_thres'],
                                    input_shape=tf.constant([
                                        parameters['input_size'],
                                        parameters['input_size']
                                    ]))
    pred = tf.concat([boxes, pred_conf], axis=-1)
    model = tf.keras.Model(input_layer, pred)
    utils.load_weights(model, parameters['weights'], parameters['model'],
                       False)
    model.summary()
    model.save(parameters['weights_tf'])
コード例 #19
0
ファイル: demo.py プロジェクト: delldu/StyleGANm
def main(args):
    cfg = load_cfg(args.cfg)
    distiller = Distiller(cfg)
    if args.ckpt is not None:
        ckpt = model_zoo(args.ckpt)
        load_weights(distiller, ckpt["state_dict"])

    while True:
        var = torch.randn(1, distiller.mapping_net.style_dim)
        img_s = distiller(var, truncated=args.truncated)
        cv2.imshow("demo", tensor_to_img(img_s[0].cpu()))
        key = chr(cv2.waitKey() & 255)
        if key == 'q':
            break
コード例 #20
0
def recv_Img(data,img):
    global flag,model,input_size
    start_time = time.time()
    frame_id = data['frame']
    frame_proc = data['proc']
    original_image= pickle.loads(img)

    class_names = {}
    with open(cfg.YOLO.CLASSES, 'r') as data:
        for ID, name in enumerate(data):
            class_names[ID] = name.strip('\n')

    # Setup tensorflow, keras and YOLOv3

    original_image      = cv2.cvtColor(original_image, cv2.COLOR_BGR2RGB)
    original_image_size = original_image.shape[:2]

    image_data = utils.image_preporcess(np.copy(original_image), [input_size, input_size])
    image_data = image_data[np.newaxis, ...].astype(np.float32)

    if flag:    
        input_layer  = tf.keras.layers.Input([input_size, input_size, 3])
        feature_maps = YOLOv3(input_layer)
        bbox_tensors = []
        for i, fm in enumerate(feature_maps):
            bbox_tensor = decode(fm, i)
            bbox_tensors.append(bbox_tensor)
        model = tf.keras.Model(input_layer, bbox_tensors)
        utils.load_weights(model, "./yolov3.weights")
        flag=False
    pred_bbox = model.predict(image_data)
    pred_bbox = [tf.reshape(x, (-1, tf.shape(x)[-1])) for x in pred_bbox]
    pred_bbox = tf.concat(pred_bbox, axis=0)

    bboxes = utils.postprocess_boxes(pred_bbox, original_image_size, input_size, 0.3)
    bboxes = utils.nms(bboxes, 0.45, method='nms')

    # We have our objects detected and boxed, lets move the class name into a list
    objects_detected = []
    for x0,y0,x1,y1,prob,class_id in bboxes:
        objects_detected.append(class_names[class_id])
    #put classes and its frequency on a dictionary 
    final_dict={x:objects_detected.count(x) for x in set(objects_detected)}

    elapsed_time = time.time() - start_time
    
    message={"frame": frame_id, 'proc': frame_proc,'classes':final_dict,'timestamp':elapsed_time}
    endpoint="http://" + srvr + ':'  + prt+"/result"
    requests.post(endpoint,json=message)
    return message
コード例 #21
0
def load_model_yolov4(input_size=608,
                      weights='./data/yolov4_original_last.weights'):

    NUM_CLASS = len(utils.read_class_names(cfg.YOLO.CLASSES))
    input_layer = tf.keras.layers.Input([input_size, input_size, 3])
    feature_maps = YOLOv4(input_layer, NUM_CLASS)
    bbox_tensors = []
    for i, fm in enumerate(feature_maps):
        bbox_tensor = decode(fm, NUM_CLASS, i)
        bbox_tensors.append(bbox_tensor)
    model = tf.keras.Model(input_layer, bbox_tensors)
    utils.load_weights(model, weights)

    model.summary()
    return model
コード例 #22
0
def main(args):
    cfg = load_cfg(args.cfg)
    distiller = Distiller(cfg)
    if args.ckpt is not None:
        ckpt = model_zoo(args.ckpt)
        load_weights(distiller, ckpt["state_dict"])

    for i in range(args.n_output):
        var = torch.randn(1, distiller.mapping_net.style_dim)
        img_s = distiller(var, truncated=args.truncated)
        path = args.output_path + "pic" + str(i) + ".jpg"
        cv2.imwrite(path, tensor_to_img(img_s[0].cpu()))
        key = chr(cv2.waitKey() & 255)
        if key == 'q':
            break
コード例 #23
0
def create_coco_model():

    input_size = 416
    input_layer = tf.keras.layers.Input([input_size, input_size, 3])
    feature_maps = YOLOv3(input_layer)

    bbox_tensors = []
    for i, fm in enumerate(feature_maps):
        bbox_tensor = decode(fm, i)
        bbox_tensors.append(bbox_tensor)

    model = tf.keras.Model(input_layer, bbox_tensors)
    model.summary()
    utils.load_weights(model, "./yolov3.weights")
    return model
コード例 #24
0
ファイル: generate.py プロジェクト: delldu/StyleGANm
def main(args):
    cfg = load_cfg(args.cfg)
    distiller = Distiller(cfg)
    if args.ckpt is not None:
        ckpt = model_zoo(args.ckpt)
        load_weights(distiller, ckpt["state_dict"])

    distiller = distiller.to(args.device)
    for i in tqdm(range(args.n_batches)):
        var = torch.randn(args.batch_size,
                          distiller.mapping_net.style_dim).to(args.device)
        img_s = distiller(var, truncated=args.truncated)
        for j in range(img_s.size(0)):
            cv2.imwrite(
                os.path.join(args.output_path, f"{i*args.batch_size + j}.png"),
                tensor_to_img(img_s[j].cpu()))
コード例 #25
0
def main(argv):

    flags = parser(
        description="freeze yolov3 graph from checkpoint file").parse_args()
    classes = utils.read_coco_names("./data/coco.names")
    num_classes = len(classes)
    SIZE = flags.image_size
    print("=> the input image size is [%d, %d]" % (SIZE, SIZE))
    model = yolov3.yolov3(num_classes)

    with tf.Graph().as_default() as graph:
        sess = tf.Session(graph=graph)
        inputs = tf.placeholder(
            tf.float32, [1, SIZE, SIZE, 3])  # placeholder for detector inputs

        with tf.variable_scope('yolov3'):
            feature_map = model.forward(inputs, is_training=False)

        boxes, confs, probs = model.predict(feature_map)
        scores = confs * probs
        print("=>", boxes, scores)
        boxes, scores, labels = utils.gpu_nms(
            boxes,
            scores,
            num_classes,
            score_thresh=flags.score_threshold,
            iou_thresh=flags.iou_threshold)
        print("=>", boxes, scores, labels)
        feature_map_1, feature_map_2, feature_map_3 = feature_map
        print("=>", feature_map_1, feature_map_2, feature_map_3)
        saver = tf.train.Saver(var_list=tf.global_variables(scope='yolov3'))

        if flags.convert:
            if not os.path.exists(flags.weights_path):
                url = 'https://github.com/YunYang1994/tensorflow-yolov3/releases/download/v1.0/yolov3.weights'
                for i in range(3):
                    time.sleep(1)
                    print("=> %s does not exists ! " % flags.weights_path)
                print("=> It will take a while to download it from %s" % url)
                print('=> Downloading yolov3 weights ... ')
                wget.download(url, flags.weights_path)

            load_ops = utils.load_weights(tf.global_variables(scope='yolov3'),
                                          flags.weights_path)
            sess.run(load_ops)
            save_path = saver.save(sess, save_path=flags.ckpt_file)
            print('=> model saved in path: {}'.format(save_path))

        if flags.freeze:
            saver.restore(sess, flags.ckpt_file)
            print('=> checkpoint file restored from ', flags.ckpt_file)
            utils.freeze_graph(sess, './checkpoint/yolov3_cpu_nms.pb',
                               ["concat_9", "mul_6"])
            utils.freeze_graph(sess, './checkpoint/yolov3_gpu_nms.pb',
                               ["concat_10", "concat_11", "concat_12"])
            utils.freeze_graph(sess, './checkpoint/yolov3_feature.pb', [
                "yolov3/yolo-v3/feature_map_1",
                "yolov3/yolo-v3/feature_map_2",
                "yolov3/yolo-v3/feature_map_3",
            ])
コード例 #26
0
def main(argv):

    flags = parser(
        description="freeze yolov3 graph from checkpoint file").parse_args()
    print("=> the input image size is [%d, %d]" %
          (flags.image_h, flags.image_w))
    anchors = utils.get_anchors(flags.anchors_path, flags.image_h,
                                flags.image_w)
    model = yolov3.yolov3(flags.num_classes, anchors)

    with tf.Graph().as_default() as graph:
        sess = tf.Session(graph=graph)
        inputs = tf.placeholder(tf.float32,
                                [1, flags.image_h, flags.image_w, 3
                                 ])  # placeholder for detector inputs
        print("=>", inputs)

        with tf.variable_scope('yolov3'):
            feature_map = model.forward(inputs, is_training=False)

        boxes, confs, probs = model.predict(feature_map)
        scores = confs * probs
        print("=>", boxes.name[:-2], scores.name[:-2])
        cpu_out_node_names = [boxes.name[:-2], scores.name[:-2]]
        boxes, scores, labels = utils.gpu_nms(
            boxes,
            scores,
            flags.num_classes,
            score_thresh=flags.score_threshold,
            iou_thresh=flags.iou_threshold)
        print("=>", boxes.name[:-2], scores.name[:-2], labels.name[:-2])
        gpu_out_node_names = [
            boxes.name[:-2], scores.name[:-2], labels.name[:-2]
        ]
        feature_map_1, feature_map_2, feature_map_3 = feature_map
        saver = tf.train.Saver(var_list=tf.global_variables(scope='yolov3'))

        if flags.convert:
            if not os.path.exists(flags.weights_path):
                url = 'https://github.com/YunYang1994/tensorflow-yolov3/releases/download/v1.0/yolov3.weights'
                for i in range(3):
                    time.sleep(1)
                    print("=> %s does not exists ! " % flags.weights_path)
                print("=> It will take a while to download it from %s" % url)
                print('=> Downloading yolov3 weights ... ')
                wget.download(url, flags.weights_path)

            load_ops = utils.load_weights(tf.global_variables(scope='yolov3'),
                                          flags.weights_path)
            sess.run(load_ops)
            save_path = saver.save(sess, save_path=flags.ckpt_file)
            print('=> model saved in path: {}'.format(save_path))

        if flags.freeze:
            saver.restore(sess, flags.ckpt_file)
            print('=> checkpoint file restored from ', flags.ckpt_file)
            utils.freeze_graph(sess, './checkpoint/yolov3_cpu_nms.pb',
                               cpu_out_node_names)
            utils.freeze_graph(sess, './checkpoint/yolov3_gpu_nms.pb',
                               gpu_out_node_names)
コード例 #27
0
    def create_model(self, weight=None):
        import tensorflow as tf
        from keras.utils import multi_gpu_model
        from core.utils import load_weights
        from core.config.resnet_base import GPU_DEVICES

        nb_gpu = len(GPU_DEVICES)
        if nb_gpu > 1:
            with tf.device('/cpu:0'):
                model = self._model_func()
                model = load_weights(model, weight)
            self.model = multi_gpu_model(model, gpus=nb_gpu)
        else:
            model = self._model_func()
            self.model = load_weights(model, weight)
        return self.model
コード例 #28
0
ファイル: convert_weight.py プロジェクト: wkvanderveen/hwr
    def convert_weights(self):
        print(f"=> the input image size is [{self.img_h}, {self.img_w}]")
        anchors = utils.get_anchors(self.anchors_path, self.img_h, self.img_w)
        model = yolov3.yolov3(self.num_classes, anchors)

        with tf.Graph().as_default() as graph:
            sess = tf.Session(graph=graph)
            inputs = tf.placeholder(tf.float32,
                                    [1, self.img_h, self.img_w, 1
                                     ])  # placeholder for detector inputs
            print("=>", inputs)

            with tf.variable_scope('yolov3'):
                feature_map = model.forward(inputs,
                                            n_filters_dn=self.n_filters_dn,
                                            n_strides_dn=self.n_strides_dn,
                                            n_ksizes_dn=self.n_ksizes_dn,
                                            is_training=False)

            boxes, confs, probs = model.predict(feature_map)
            scores = confs * probs
            print("=>", boxes.name[:-2], scores.name[:-2])
            cpu_out_node_names = [boxes.name[:-2], scores.name[:-2]]
            boxes, scores, labels = utils.gpu_nms(boxes, scores,
                                                  self.num_classes)
            print("=>", boxes.name[:-2], scores.name[:-2], labels.name[:-2])
            gpu_out_node_names = [
                boxes.name[:-2], scores.name[:-2], labels.name[:-2]
            ]

            saver = tf.train.Saver(var_list=tf.global_variables(
                scope='yolov3'))

            if self.convert:
                load_ops = utils.load_weights(
                    tf.global_variables(scope='yolov3'), self.weights_dir)
                sess.run(load_ops)
                save_path = saver.save(sess, save_path=self.checkpoint_dir)
                print(f'=> model saved in path: {save_path}')

            if self.freeze:
                ckpt_idx = self.checkpoint_dir + '-' + str(
                    self.checkpoint_step)
                try:
                    saver.restore(sess, ckpt_idx)
                except:
                    print(
                        f"Error: you tried to restore a checkpoint ({self.checkpoint_dir}) that doesn't exist."
                    )
                    print(
                        "Please clear the network and retrain, or load a different checkpoint by changing the steps parameter."
                    )
                print('=> checkpoint file restored from ', ckpt_idx)
                utils.freeze_graph(sess,
                                   '../../data/checkpoint/yolov3_cpu_nms.pb',
                                   cpu_out_node_names)
                utils.freeze_graph(sess,
                                   '../../data/checkpoint/yolov3_gpu_nms.pb',
                                   gpu_out_node_names)
コード例 #29
0
ファイル: web_demo.py プロジェクト: FredBebop/YOLOV3
def detect():
    global vid, outputFrame, lock
    num_classes = 80
    input_size = 416

    input_layer = tf.keras.layers.Input([input_size, input_size, 3])
    feature_maps = YOLOv3(input_layer)

    bbox_tensors = []
    for i, fm in enumerate(feature_maps):
        bbox_tensor = decode(fm, i)
        bbox_tensors.append(bbox_tensor)

    model = tf.keras.Model(input_layer, bbox_tensors)
    utils.load_weights(model, "./yolov3.weights")
    while True:
        detect_yolov3(vid,model,input_size)
コード例 #30
0
def create_ppe_detector(input_size):
    if FLAGS.framework == 'tf':
        input_layer = tf.keras.layers.Input([input_size, input_size, 3])
        feature_maps = YOLOv4(input_layer, NUM_CLASSES_PPE)
        bbox_tensors = []
        for i, fm in enumerate(feature_maps):
            bbox_tensor = decode(fm, NUM_CLASSES_PPE, i)
            bbox_tensors.append(bbox_tensor)
        model = tf.keras.Model(input_layer, bbox_tensors)
        utils.load_weights(model, FLAGS.ppe_weights)
        model.summary()
        return model
    elif FLAGS.framework == 'trt':
        saved_model_loaded = tf.saved_model.load(FLAGS.ppe_weights,
                                                 tags=[tag_constants.SERVING])
        infer = saved_model_loaded.signatures['serving_default']
        return infer