예제 #1
0
def save_tf():
  NUM_CLASS = len(utils.read_class_names(cfg.YOLO.CLASSES))
  input_layer = tf.keras.layers.Input([FLAGS.input_size, FLAGS.input_size, 3])
  if FLAGS.tiny:
    feature_maps = YOLOv3_tiny(input_layer, NUM_CLASS)
    bbox_tensors = []
    for i, fm in enumerate(feature_maps):
      bbox_tensor = decode(fm, NUM_CLASS, i)
      bbox_tensors.append(bbox_tensor)
    model = tf.keras.Model(input_layer, bbox_tensors)
    utils.load_weights_tiny(model, FLAGS.weights)
  else:
    if FLAGS.model == 'yolov3':
      feature_maps = YOLOv3(input_layer, NUM_CLASS)
      bbox_tensors = []
      for i, fm in enumerate(feature_maps):
        bbox_tensor = decode(fm, NUM_CLASS, i)
        bbox_tensors.append(bbox_tensor)
      model = tf.keras.Model(input_layer, bbox_tensors)
      utils.load_weights_v3(model, FLAGS.weights)
    elif FLAGS.model == 'yolov4':
      feature_maps = YOLOv4(input_layer, NUM_CLASS)
      bbox_tensors = []
      for i, fm in enumerate(feature_maps):
        bbox_tensor = decode(fm, NUM_CLASS, i)
        bbox_tensors.append(bbox_tensor)
      model = tf.keras.Model(input_layer, bbox_tensors)
      utils.load_weights(model, FLAGS.weights)

    model = tf.keras.Model(input_layer, bbox_tensors)
    model.summary()
    utils.load_weights(model, FLAGS.weights)

  model.save(FLAGS.output)
예제 #2
0
def create_model(anchor_per_scale=3, num_classes=1, max_bbox_per_scale=150):
    input_tensor = tf.keras.layers.Input([416, 416, 3])
    conv_tensors = YOLOv3(input_tensor)

    output_tensors = []
    for i, conv_tensor in enumerate(conv_tensors):
        pred_tensor = decode(conv_tensor, i)
        output_tensors.append(conv_tensor)
        output_tensors.append(pred_tensor)

    model_body = tf.keras.Model(input_tensor, output_tensors)

    train_output_sizes = 416 // np.array(cfg.YOLO.STRIDES)
    batch_label_sbbox = tf.keras.layers.Input((train_output_sizes[0], train_output_sizes[0],
                                               anchor_per_scale, 5 + num_classes), dtype=np.float32)
    batch_label_mbbox = tf.keras.layers.Input((train_output_sizes[1], train_output_sizes[1],
                                               anchor_per_scale, 5 + num_classes), dtype=np.float32)
    batch_label_lbbox = tf.keras.layers.Input((train_output_sizes[2], train_output_sizes[2],
                                               anchor_per_scale, 5 + num_classes), dtype=np.float32)

    batch_sbboxes = tf.keras.layers.Input((max_bbox_per_scale, 4), dtype=np.float32)
    batch_mbboxes = tf.keras.layers.Input((max_bbox_per_scale, 4), dtype=np.float32)
    batch_lbboxes = tf.keras.layers.Input((max_bbox_per_scale, 4), dtype=np.float32)

    target = [batch_label_sbbox, batch_sbboxes, batch_label_mbbox, batch_mbboxes,
              batch_label_lbbox, batch_lbboxes]

    model_loss = tf.keras.layers.Lambda(yolo_loss, output_shape=(1,), name='yolo_loss')([*model_body.output, target])

    new_model = tf.keras.Model([model_body.input, *target], model_loss)


    return new_model
예제 #3
0
    def training(self):

        self.__getDataset()

        trainset = Dataset('train')
        logdir = "./data/log"
        steps_per_epoch = len(trainset)
        global_steps = tf.Variable(1, trainable=False, dtype=tf.int64)
        warmup_steps = cfg.TRAIN.WARMUP_EPOCHS * steps_per_epoch
        total_steps = cfg.TRAIN.EPOCHS * steps_per_epoch

        input_tensor = tf.keras.layers.Input([416, 416, 3])
        conv_tensors = YOLOv3(input_tensor)

        output_tensors = []
        for i, conv_tensor in enumerate(conv_tensors):
            pred_tensor = decode(conv_tensor, i)
            output_tensors.append(conv_tensor)
            output_tensors.append(pred_tensor)

        model = tf.keras.Model(input_tensor, output_tensors)
        optimizer = tf.keras.optimizers.Adam()
        if os.path.exists(logdir): shutil.rmtree(logdir)
        writer = tf.summary.create_file_writer(logdir)

        self._tb.start()
        for epoch in range(cfg.TRAIN.EPOCHS):
            print(epoch)
            for image_data, target in trainset:
                self.__train_step(image_data, target, model, global_steps,
                                  writer, optimizer, warmup_steps, total_steps)
            model.save_weights(self._args.ckpt_path)
        self._tb.stop()
        model.save(f"./models")

        zipFolder("check.zip", "checkpoint")
        zipFolder("log.zip", "data/log")
        zipFolder("model.zip", "models")

        self._run.upload_file(name='check.zip', path_or_stream="check.zip")
        print(
            f"Uploaded the checkpoints to experiment {self._run.experiment.name}"
        )
        self._run.upload_file(name='log.zip', path_or_stream="log.zip")
        print(f"Uploaded the tfruns to experiment {self._run.experiment.name}")
        self._run.upload_file(name='model.zip', path_or_stream="model.zip")
        print(f"Uploaded the model to experiment {self._run.experiment.name}")

        print("Following files are uploaded")
        print(self._run.get_file_names())

        self._run.add_properties({
            "release_id": self._args.release_id,
            "run_type": "train"
        })
        print(f"added properties: {self._run.properties}")

        self._run.complete()
예제 #4
0
def yolo_loss(target, output):
    """计算损失,for循环计算三个采样率下的损失,注意:此处取三种采样率下的总损失而不是平均损失"""
    giou_loss=conf_loss=prob_loss=0
    for i in range(3):
        # pred.shape (8, 52, 52, 3, 25) -----  output[i].shape  (8, 52, 52, 75)
        pred = yolov3.decode(output[i], i)
        loss_items = yolov3.compute_loss(pred, output[i], *target[i], i)
        giou_loss += loss_items[0]
        conf_loss += loss_items[1]
        prob_loss += loss_items[2]
    return [giou_loss, conf_loss, prob_loss]
def get_object(video_url, threshold=0.45):
    input_layer = tf.keras.layers.Input([input_size, input_size, 3])
    feature_maps = YOLOv3(input_layer)

    bbox_tensors = []
    for i, fm in enumerate(feature_maps):
        bbox_tensor = decode(fm, i)
        bbox_tensors.append(bbox_tensor)

    model = tf.keras.Model(input_layer, bbox_tensors)
    utils.load_weights(model, "yolov3_union_10000.weights")
    model.summary()

    vid = cv2.VideoCapture(video_url)
    while True:
        return_value, frame = vid.read()
        if return_value:
            frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
        else:
            return "No image"
        frame_size = frame.shape[:2]
        image_data = utils.image_preporcess(np.copy(frame),
                                            [input_size, input_size])
        image_data = image_data[np.newaxis, ...].astype(np.float32)

        prev_time = time.time()
        pred_bbox = model.predict_on_batch(image_data)
        curr_time = time.time()
        exec_time = curr_time - prev_time

        pred_bbox = [tf.reshape(x, (-1, tf.shape(x)[-1])) for x in pred_bbox]
        pred_bbox = tf.concat(pred_bbox, axis=0)
        bboxes = utils.postprocess_boxes(pred_bbox, frame_size, input_size,
                                         0.3)
        bboxes = utils.nms(bboxes, threshold, method='nms')
        image = utils.draw_bbox(frame, bboxes)

        result = np.asarray(image)
        info = "time: %.2f ms" % (1000 * exec_time)
        cv2.putText(result,
                    text=info,
                    org=(50, 70),
                    fontFace=cv2.FONT_HERSHEY_SIMPLEX,
                    fontScale=1,
                    color=(255, 0, 0),
                    thickness=2)

        result = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)

        retval, buffer = cv2.imencode(".jpeg", image)
        yield ((b'--frame\r\n'
                b'Content-Type: image/jpeg\r\n\r\n' + buffer.tobytes() +
                b'\r\n'))
예제 #6
0
def load_Yolo_model():
    """Load a yolo model and its weights for inference."""
    input_layer = tf.keras.layers.Input([None, None, 3])
    feature_maps = YOLOv3(input_layer)

    bbox_tensors = []
    for i, fm in enumerate(feature_maps):
        bbox_tensor = decode(fm, i)
        bbox_tensors.append(bbox_tensor)

    model = tf.keras.Model(input_layer, bbox_tensors)
    utils.load_weights(model, "./yolov3.weights")
    return model
예제 #7
0
def recv_Img(data,img):
    global flag,model,input_size
    start_time = time.time()
    frame_id = data['frame']
    frame_proc = data['proc']
    original_image= pickle.loads(img)

    class_names = {}
    with open(cfg.YOLO.CLASSES, 'r') as data:
        for ID, name in enumerate(data):
            class_names[ID] = name.strip('\n')

    # Setup tensorflow, keras and YOLOv3

    original_image      = cv2.cvtColor(original_image, cv2.COLOR_BGR2RGB)
    original_image_size = original_image.shape[:2]

    image_data = utils.image_preporcess(np.copy(original_image), [input_size, input_size])
    image_data = image_data[np.newaxis, ...].astype(np.float32)

    if flag:    
        input_layer  = tf.keras.layers.Input([input_size, input_size, 3])
        feature_maps = YOLOv3(input_layer)
        bbox_tensors = []
        for i, fm in enumerate(feature_maps):
            bbox_tensor = decode(fm, i)
            bbox_tensors.append(bbox_tensor)
        model = tf.keras.Model(input_layer, bbox_tensors)
        utils.load_weights(model, "./yolov3.weights")
        flag=False
    pred_bbox = model.predict(image_data)
    pred_bbox = [tf.reshape(x, (-1, tf.shape(x)[-1])) for x in pred_bbox]
    pred_bbox = tf.concat(pred_bbox, axis=0)

    bboxes = utils.postprocess_boxes(pred_bbox, original_image_size, input_size, 0.3)
    bboxes = utils.nms(bboxes, 0.45, method='nms')

    # We have our objects detected and boxed, lets move the class name into a list
    objects_detected = []
    for x0,y0,x1,y1,prob,class_id in bboxes:
        objects_detected.append(class_names[class_id])
    #put classes and its frequency on a dictionary 
    final_dict={x:objects_detected.count(x) for x in set(objects_detected)}

    elapsed_time = time.time() - start_time
    
    message={"frame": frame_id, 'proc': frame_proc,'classes':final_dict,'timestamp':elapsed_time}
    endpoint="http://" + srvr + ':'  + prt+"/result"
    requests.post(endpoint,json=message)
    return message
예제 #8
0
def detect(image_path):
    #original_image=Image.open(BytesIO(original_image)).convert("RGBA")
    original_image = cv2.imread(
        image_path
    )  #you can and should replace this line to receive the image directly (not from a file)
    #original_image = base64.b64decode(dec_image)
    # Read class names
    class_names = {}
    with open(cfg.YOLO.CLASSES, 'r') as data:
        for ID, name in enumerate(data):
            class_names[ID] = name.strip('\n')

    # Setup tensorflow, keras and YOLOv3
    input_size = 416
    input_layer = tf.keras.layers.Input([input_size, input_size, 3])
    feature_maps = YOLOv3(input_layer)

    original_image = cv2.cvtColor(original_image, cv2.COLOR_BGR2RGB)
    original_image_size = original_image.shape[:2]

    image_data = utils.image_preporcess(np.copy(original_image),
                                        [input_size, input_size])
    image_data = image_data[np.newaxis, ...].astype(np.float32)

    bbox_tensors = []
    for i, fm in enumerate(feature_maps):
        bbox_tensor = decode(fm, i)
        bbox_tensors.append(bbox_tensor)

    model = tf.keras.Model(input_layer, bbox_tensors)
    load_weights(model, "./yolov3.weights")

    pred_bbox = model.predict(image_data)
    pred_bbox = [tf.reshape(x, (-1, tf.shape(x)[-1])) for x in pred_bbox]
    pred_bbox = tf.concat(pred_bbox, axis=0)

    bboxes = utils.postprocess_boxes(pred_bbox, original_image_size,
                                     input_size, 0.3)
    bboxes = utils.nms(bboxes, 0.45, method='nms')

    # We have our objects detected and boxed, lets move the class name into a list
    objects_detected = []
    for x0, y0, x1, y1, prob, class_id in bboxes:
        objects_detected.append(class_names[class_id])

    # Lets show the user a nice picture - should be erased in production
    #image = utils.draw_bbox(original_image, bboxes)
    #image = Image.fromarray(image)
    #image.show()
    return objects_detected
예제 #9
0
    def __loadModel(self):
        input_layer = tf.keras.layers.Input(
            [self._INPUT_SIZE, self._INPUT_SIZE, 3])
        feature_maps = YOLOv3(input_layer)

        bbox_tensors = []
        for i, fm in enumerate(feature_maps):
            bbox_tensor = decode(fm, i)
            bbox_tensors.append(bbox_tensor)

        model = tf.keras.Model(input_layer, bbox_tensors)
        model.load_weights(self._args.ckpt_path)

        return model
예제 #10
0
def create_coco_model():

    input_size = 416
    input_layer = tf.keras.layers.Input([input_size, input_size, 3])
    feature_maps = YOLOv3(input_layer)

    bbox_tensors = []
    for i, fm in enumerate(feature_maps):
        bbox_tensor = decode(fm, i)
        bbox_tensors.append(bbox_tensor)

    model = tf.keras.Model(input_layer, bbox_tensors)
    model.summary()
    utils.load_weights(model, "./yolov3.weights")
    return model
예제 #11
0
def detect():
    global vid, outputFrame, lock
    num_classes = 80
    input_size = 416

    input_layer = tf.keras.layers.Input([input_size, input_size, 3])
    feature_maps = YOLOv3(input_layer)

    bbox_tensors = []
    for i, fm in enumerate(feature_maps):
        bbox_tensor = decode(fm, i)
        bbox_tensors.append(bbox_tensor)

    model = tf.keras.Model(input_layer, bbox_tensors)
    utils.load_weights(model, "./yolov3.weights")
    while True:
        detect_yolov3(vid,model,input_size)
 def __init__(self, address, port):
     self.server_address = address
     self.server_port = port
     self.app = Flask(__name__)
     self.worker_port = 0
     self.image_details = []
     self.bbox_tensors = []
     # Setup tensorflow, keras and YOLOv3
     self.input_size = 416
     self.input_layer = tf.keras.layers.Input(
         [self.input_size, self.input_size, 3])
     self.feature_maps = YOLOv3(self.input_layer)
     for i, fm in enumerate(self.feature_maps):
         bbox_tensor = decode(fm, i)
         self.bbox_tensors.append(bbox_tensor)
     self.model = tf.keras.Model(self.input_layer, self.bbox_tensors)
     utils.load_weights(self.model, "./yolov3.weights")
     #app routing
     self.app.route('/receiveFrame', methods=['POST',
                                              'GET'])(self.receiveFrame)
     self.main()
예제 #13
0
    def run(self):
        bbox_tensors = []
        for i, fm in enumerate(self._feature_maps):
            bbox_tensor = decode(fm, i)
            bbox_tensors.append(bbox_tensor)

        model = tf.keras.Model(self._input_layer, bbox_tensors)
        utils.load_weights(model, "./checkpoint/yolov3.ckpt")
        model.summary()
        
        while True:
            return_value, frame = self._vid.read()
            if return_value:
                frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
            else:
                raise ValueError("No image!")
            frame_size = frame.shape[:2]
            image_data = utils.image_preporcess(np.copy(frame), [self._input_size, self._input_size])
            image_data = image_data[np.newaxis, ...].astype(np.float32)

            prev_time = time.time()
            pred_bbox = model.predict(image_data)
            curr_time = time.time()
            exec_time = curr_time - prev_time

            pred_bbox = [tf.reshape(x, (-1, tf.shape(x)[-1])) for x in pred_bbox]
            pred_bbox = tf.concat(pred_bbox, axis=0)
            bboxes = utils.postprocess_boxes(pred_bbox, frame_size, self._input_size, 0.3)
            bboxes = utils.nms(bboxes, 0.45, method='nms')
            image = utils.draw_bbox(frame, bboxes)

            result = np.asarray(image)
            info = "time: %.2f ms" %(1000*exec_time)
            cv2.putText(result, text=info, org=(50, 70), fontFace=cv2.FONT_HERSHEY_SIMPLEX,
                        fontScale=1, color=(255, 0, 0), thickness=2)
            cv2.namedWindow("result", cv2.WINDOW_AUTOSIZE)
            result = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
            cv2.imshow("result", result)
            if cv2.waitKey(1) & 0xFF == ord('q'): break
예제 #14
0
num_class       = 80
input_size      = 416

image_path      = "./docs/kite.jpg"
original_image = cv2.imread(image_path)
original_image = cv2.cvtColor(original_image, cv2.COLOR_BGR2RGB)
original_image_size = original_image.shape[:2]
image_data = utils.image_preporcess(np.copy(original_image), [input_size, input_size])
image_data = image_data[np.newaxis, ...].astype(np.float32)

input_layer = tf.keras.layers.Input([input_size, input_size, 3])
feature_maps = YOLOv3(input_layer)

bbox_tensors = []
for i, fm in enumerate(feature_maps):
    bbox_tensor = decode(fm, i)
    bbox_tensors.append(tf.reshape(bbox_tensor, (-1, 5+num_class)))

bbox_tensors = tf.concat(bbox_tensors, axis=0)
model = tf.keras.Model(input_layer, bbox_tensors)
utils.load_weights(model, "./yolov3.weights")

pred_bbox = model(image_data)
bboxes = utils.postprocess_boxes(pred_bbox, original_image_size, input_size, 0.3)
bboxes = utils.nms(bboxes, 0.45, method='nms')
image = utils.draw_bbox(original_image, bboxes)
image = Image.fromarray(image)
image.show()
# image.save("./docs/kite_result.jpg")

예제 #15
0
파일: train.py 프로젝트: msorrives/YOLOv3
from core.yolov3 import YOLOv3, decode, compute_loss
from core.config import cfg

trainset = Dataset('train')
logdir = "./data/log"
steps_per_epoch = len(trainset)
global_steps = tf.Variable(1, trainable=False, dtype=tf.int64)
warmup_steps = cfg.TRAIN.WARMUP_EPOCHS * steps_per_epoch
total_steps = cfg.TRAIN.EPOCHS * steps_per_epoch

input_tensor = tf.keras.layers.Input([416, 416, 3])
conv_tensors = YOLOv3(input_tensor)

output_tensors = []
for i, conv_tensor in enumerate(conv_tensors):
    pred_tensor = decode(conv_tensor, i)
    output_tensors.append(conv_tensor)
    output_tensors.append(pred_tensor)

model = tf.keras.Model(input_tensor, output_tensors)
optimizer = tf.keras.optimizers.Adam()
if os.path.exists(logdir): shutil.rmtree(logdir)
writer = tf.summary.create_file_writer(logdir)

def train_step(image_data, target):
    with tf.GradientTape() as tape:
        pred_result = model(image_data, training=True)
        giou_loss=conf_loss=prob_loss=0

        # optimizing process
        for i in range(3):
예제 #16
0
ground_truth_dir_path = '../mAP/ground_truth'
if os.path.exists(predicted_dir_path): shutil.rmtree(predicted_dir_path)
if os.path.exists(ground_truth_dir_path): shutil.rmtree(ground_truth_dir_path)
if os.path.exists(cfg.TEST.DETECTED_IMAGE_PATH): shutil.rmtree(cfg.TEST.DETECTED_IMAGE_PATH)
os.mkdir(predicted_dir_path)
os.mkdir(ground_truth_dir_path)
os.mkdir(cfg.TEST.DETECTED_IMAGE_PATH)


# build model

input_layer = tf.keras.Input(shape = [INPUT_SIZE, INPUT_SIZE, 3])
feature_maps = YOLOv3(input_layer)
output_layer = []
for i, fm in enumerate(feature_maps):
    output_tensor = decode(fm, i)
    output_layer.append(output_tensor)
    
model = tf.Model(inputs = input_layer, outputs = output_layer)
model.load_weights('./yolov3')


# 打开test数据文件,边测边写
with open(cfg.TEST.ANNOT_PATH, 'r') as annotation_file:
    # annotation = annotation_file.readlines()
    for num, line in enumerate(annotation_file):
        annotation = line.strip().split()
        image_path = annotation[0]
        image_name = annotation.split('/')[-1]
        image = cv2.imread(image_path)
        image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
예제 #17
0
def yolov3(file_path, file, output_path):
    input_size = 416
    # image_path   = "./docs/2.jpg"
    # image_path   = "./test.jpg"
    input_path = os.path.join(file_path, file)

    input_layer = tf.keras.layers.Input([input_size, input_size, 3])
    feature_maps = YOLOv3(input_layer)

    original_image = cv2.imread(input_path)
    original_image = cv2.cvtColor(original_image, cv2.COLOR_BGR2RGB)
    original_image_size = original_image.shape[:2]

    image_data = utils.image_preporcess(np.copy(original_image),
                                        [input_size, input_size])
    image_data = image_data[np.newaxis, ...].astype(np.float32)

    bbox_tensors = []
    for i, fm in enumerate(feature_maps):
        bbox_tensor = decode(fm, i)
        bbox_tensors.append(bbox_tensor)

    model = tf.keras.Model(input_layer, bbox_tensors)
    utils.load_weights(model, "./yolov3.weights")
    model.summary()

    pred_bbox = model.predict(image_data)
    pred_bbox = [tf.reshape(x, (-1, tf.shape(x)[-1])) for x in pred_bbox]
    pred_bbox = tf.concat(pred_bbox, axis=0)
    bboxes = utils.postprocess_boxes(pred_bbox, original_image_size,
                                     input_size, 0.3)
    bboxes = utils.nms(bboxes, 0.45, method='nms')
    """
            bboxes: [x_min, y_min, x_max, y_max, probability, cls_id] format coordinates.
    """
    box_matrix = np.matrix(bboxes)

    box_size = np.zeros((len(box_matrix), 5))
    for i in range(0, len(box_matrix)):
        box_size[i, 0] = box_matrix[i, 0]
        box_size[i, 1] = box_matrix[i, 1]
        box_size[i, 2] = box_matrix[i, 2]
        box_size[i, 3] = box_matrix[i, 3]
        box_size[i, 4] = box_matrix[i, 5]
    name = ['x_min', 'y_min', 'x_max', 'y_max', 'cls_id']

    # make dictionary
    id_name = {}

    with open('./name.csv', 'r', encoding='utf-8') as F:
        reader = csv.reader(F)
        data = [a for a in reader]
        for i in range(len(data)):
            id_name[i] = data[i][0]

    class_id = list()
    # map id and names
    for i in range(0, len(box_size)):
        class_id.append(id_name[int(box_size[i, 4])])

    box_size_data = pd.DataFrame(columns=name, data=box_size)
    box_size_data['class_id'] = class_id
    print(box_size_data)

    box_size_data = box_size_data.drop(['cls_id'], axis=1)
    print(box_size_data)

    csv_path = r'./yolo_coordinates'
    # print(box_size_data)
    box_size_data.to_csv(os.path.join(csv_path,
                                      os.path.splitext(file)[0] + '.csv'),
                         encoding='gbk',
                         index=False)
    # print(bboxes[1])
    image = utils.draw_bbox(original_image, bboxes)

    image = Image.fromarray(image)
    # image.show()
    image.save(os.path.join(output_path, file))
예제 #18
0
# gpus = tf.config.experimental.list_physical_devices('GPU')    # return all GPUs
# tf.config.experimental.set_memory_growth(device=gpus[0], enable=True)    # dynamically use GPU memory
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"  # use CPU

input_size = 416
image_path = "./demo/000000009590.jpg"
"""build model and load weights"""
input_layer = tf.keras.Input(
    shape=(input_size, input_size,
           3))  # instantiate an input layer with tensor
feature_maps = YOLOv3(
    input_layer
)  # chain input layer and hidden layers, get output [[b,52,52,3*(5+c)], [b,26,26,3*(5+c)], [b,13,13,3*(5+c)]]
bbox_tensors = []
for i, fm in enumerate(feature_maps):
    bbox_tensor = decode(fm,
                         i)  # decode into [x,y,w,h,c] corresponding to 416*416
    bbox_tensors.append(
        bbox_tensor
    )  # bbox_tensors [[b,52,52,3,5+c], [b,26,26,3,5+c], [b,13,13,3,5+c]]
model = tf.keras.Model(
    input_layer, bbox_tensors
)  # generate a model object based on input layer and output layer
utils.load_weights(model, "./yolov3.weights")
model.summary()  # print model information
"""image proprecess"""
original_image = cv2.imread(image_path)  # read test image
original_image = cv2.cvtColor(original_image,
                              cv2.COLOR_BGR2RGB)  # convert BGR to RGB mode
original_image_size = original_image.shape[:2]

image_data = utils.image_preporcess(
예제 #19
0
import cv2
import numpy as np
from PIL import Image

from core.yolov3 import YOLOv3, decode
import core.utils as utils
"""
1、定义模型
"""
input_size = 416
image_path = "D:/Anacoda/YOLO_v3_s/docs/kite.jpg"
input_layer = tf.keras.layers.Input(shape=[input_size, input_size, 3])
conv_bboxes = YOLOv3(input_layer)
output_layers = []
for i, conv_bbox in enumerate(conv_bboxes):
    pred_bbox = decode(conv_bbox, i)
    output_layers.append(pred_bbox)

model = tf.keras.Model(inputs=input_layer, outputs=output_layers)
# 加载权重
utils.load_weights(model, r"D:\Anacoda\YOLO_v3_s\docs\yolov3.weights")
model.summary()
"""
2、读取测试图片
"""
original_image = cv2.imread(image_path)
original_image = cv2.cvtColor(original_image, cv2.COLOR_BGR2RGB)
original_size = original_image.shape[:2]

image_data = utils.image_preprocess(np.copy(original_image), input_size)
image_data = image_data[np.newaxis, ...].astype(np.float32)
예제 #20
0
def work_frame(filename, count):
    global flag,model,server_addr,input_size

    start_time = datetime.datetime.now()

    # GET frame from server by server /static reference
    video_id = "video_" + str(filename) + "_frame_" + str(count) + ".jpg"
    ref_file = "static/" + video_id
    response = requests.get(server_addr + ref_file)

    # Image transformation to accepted format
    arr = np.asarray(bytearray(response.content), dtype=np.uint8)
    original_image = cv2.imdecode(arr, -1)
    
    ###### OBJECT DETECTION CODE #######

    # Read class names
    class_names = {}
    with open(cfg.YOLO.CLASSES, 'r') as data:
        for ID, name in enumerate(data):
            class_names[ID] = name.strip('\n')

    # Setup tensorflow, keras and YOLOv3

    original_image      = cv2.cvtColor(original_image, cv2.COLOR_BGR2RGB)
    original_image_size = original_image.shape[:2]

    image_data = utils.image_preporcess(np.copy(original_image), [input_size, input_size])
    image_data = image_data[np.newaxis, ...].astype(np.float32)
  
    if flag:
        input_layer  = tf.keras.layers.Input([input_size, input_size, 3])
        feature_maps = YOLOv3(input_layer)
        bbox_tensors = []
        for i, fm in enumerate(feature_maps):
            bbox_tensor = decode(fm, i)
            bbox_tensors.append(bbox_tensor)
        model = tf.keras.Model(input_layer, bbox_tensors)
        utils.load_weights(model, "./yolov3.weights")
        flag = False
    
    pred_bbox = model.predict(image_data)
    pred_bbox = [tf.reshape(x, (-1, tf.shape(x)[-1])) for x in pred_bbox]
    pred_bbox = tf.concat(pred_bbox, axis=0)

    bboxes = utils.postprocess_boxes(pred_bbox, original_image_size, input_size, 0.3)
    bboxes = utils.nms(bboxes, 0.45, method='nms')

    # We have our objects detected and boxed, lets move the class name into a list
    objects_detected = []
    for x0,y0,x1,y1,prob,class_id in bboxes:
        objects_detected.append(class_names[class_id])

    ### END OF OBJECT DETECTION CODE ###

    print(objects_detected)
    
    final_time = datetime.datetime.now() - start_time
    
    # Elaborate json with frame info and post to server in /return route
    final_dict={}
    people_count=0
    
    for obj in objects_detected:
        if str(obj) == "person":
            people_count+=1
        if str(obj) in final_dict:
            final_dict[str(obj)] += 1
        else:
            final_dict[str(obj)] = 1
    
    final_json = {
        "video_id":filename, 
        "frame_no":count, 
        "processing_time":str(final_time),
        "people_detected":people_count,
        "objects_detected":json.dumps(final_dict)
    }
    
    requests.post(server_addr + "return", json=final_json)
    return "\nDONE frame n. " + str(count) + "of video " + filename + "!\n"