Esempio n. 1
0
def save_tf():
  NUM_CLASS = len(utils.read_class_names(cfg.YOLO.CLASSES))
  input_layer = tf.keras.layers.Input([FLAGS.input_size, FLAGS.input_size, 3])
  if FLAGS.tiny:
    feature_maps = YOLOv3_tiny(input_layer, NUM_CLASS)
    bbox_tensors = []
    for i, fm in enumerate(feature_maps):
      bbox_tensor = decode(fm, NUM_CLASS, i)
      bbox_tensors.append(bbox_tensor)
    model = tf.keras.Model(input_layer, bbox_tensors)
    utils.load_weights_tiny(model, FLAGS.weights)
  else:
    if FLAGS.model == 'yolov3':
      feature_maps = YOLOv3(input_layer, NUM_CLASS)
      bbox_tensors = []
      for i, fm in enumerate(feature_maps):
        bbox_tensor = decode(fm, NUM_CLASS, i)
        bbox_tensors.append(bbox_tensor)
      model = tf.keras.Model(input_layer, bbox_tensors)
      utils.load_weights_v3(model, FLAGS.weights)
    elif FLAGS.model == 'yolov4':
      feature_maps = YOLOv4(input_layer, NUM_CLASS)
      bbox_tensors = []
      for i, fm in enumerate(feature_maps):
        bbox_tensor = decode(fm, NUM_CLASS, i)
        bbox_tensors.append(bbox_tensor)
      model = tf.keras.Model(input_layer, bbox_tensors)
      utils.load_weights(model, FLAGS.weights)

    model = tf.keras.Model(input_layer, bbox_tensors)
    model.summary()
    utils.load_weights(model, FLAGS.weights)

  model.save(FLAGS.output)
Esempio n. 2
0
def create_model(anchor_per_scale=3, num_classes=1, max_bbox_per_scale=150):
    input_tensor = tf.keras.layers.Input([416, 416, 3])
    conv_tensors = YOLOv3(input_tensor)

    output_tensors = []
    for i, conv_tensor in enumerate(conv_tensors):
        pred_tensor = decode(conv_tensor, i)
        output_tensors.append(conv_tensor)
        output_tensors.append(pred_tensor)

    model_body = tf.keras.Model(input_tensor, output_tensors)

    train_output_sizes = 416 // np.array(cfg.YOLO.STRIDES)
    batch_label_sbbox = tf.keras.layers.Input((train_output_sizes[0], train_output_sizes[0],
                                               anchor_per_scale, 5 + num_classes), dtype=np.float32)
    batch_label_mbbox = tf.keras.layers.Input((train_output_sizes[1], train_output_sizes[1],
                                               anchor_per_scale, 5 + num_classes), dtype=np.float32)
    batch_label_lbbox = tf.keras.layers.Input((train_output_sizes[2], train_output_sizes[2],
                                               anchor_per_scale, 5 + num_classes), dtype=np.float32)

    batch_sbboxes = tf.keras.layers.Input((max_bbox_per_scale, 4), dtype=np.float32)
    batch_mbboxes = tf.keras.layers.Input((max_bbox_per_scale, 4), dtype=np.float32)
    batch_lbboxes = tf.keras.layers.Input((max_bbox_per_scale, 4), dtype=np.float32)

    target = [batch_label_sbbox, batch_sbboxes, batch_label_mbbox, batch_mbboxes,
              batch_label_lbbox, batch_lbboxes]

    model_loss = tf.keras.layers.Lambda(yolo_loss, output_shape=(1,), name='yolo_loss')([*model_body.output, target])

    new_model = tf.keras.Model([model_body.input, *target], model_loss)


    return new_model
Esempio n. 3
0
    def training(self):

        self.__getDataset()

        trainset = Dataset('train')
        logdir = "./data/log"
        steps_per_epoch = len(trainset)
        global_steps = tf.Variable(1, trainable=False, dtype=tf.int64)
        warmup_steps = cfg.TRAIN.WARMUP_EPOCHS * steps_per_epoch
        total_steps = cfg.TRAIN.EPOCHS * steps_per_epoch

        input_tensor = tf.keras.layers.Input([416, 416, 3])
        conv_tensors = YOLOv3(input_tensor)

        output_tensors = []
        for i, conv_tensor in enumerate(conv_tensors):
            pred_tensor = decode(conv_tensor, i)
            output_tensors.append(conv_tensor)
            output_tensors.append(pred_tensor)

        model = tf.keras.Model(input_tensor, output_tensors)
        optimizer = tf.keras.optimizers.Adam()
        if os.path.exists(logdir): shutil.rmtree(logdir)
        writer = tf.summary.create_file_writer(logdir)

        self._tb.start()
        for epoch in range(cfg.TRAIN.EPOCHS):
            print(epoch)
            for image_data, target in trainset:
                self.__train_step(image_data, target, model, global_steps,
                                  writer, optimizer, warmup_steps, total_steps)
            model.save_weights(self._args.ckpt_path)
        self._tb.stop()
        model.save(f"./models")

        zipFolder("check.zip", "checkpoint")
        zipFolder("log.zip", "data/log")
        zipFolder("model.zip", "models")

        self._run.upload_file(name='check.zip', path_or_stream="check.zip")
        print(
            f"Uploaded the checkpoints to experiment {self._run.experiment.name}"
        )
        self._run.upload_file(name='log.zip', path_or_stream="log.zip")
        print(f"Uploaded the tfruns to experiment {self._run.experiment.name}")
        self._run.upload_file(name='model.zip', path_or_stream="model.zip")
        print(f"Uploaded the model to experiment {self._run.experiment.name}")

        print("Following files are uploaded")
        print(self._run.get_file_names())

        self._run.add_properties({
            "release_id": self._args.release_id,
            "run_type": "train"
        })
        print(f"added properties: {self._run.properties}")

        self._run.complete()
def get_object(video_url, threshold=0.45):
    input_layer = tf.keras.layers.Input([input_size, input_size, 3])
    feature_maps = YOLOv3(input_layer)

    bbox_tensors = []
    for i, fm in enumerate(feature_maps):
        bbox_tensor = decode(fm, i)
        bbox_tensors.append(bbox_tensor)

    model = tf.keras.Model(input_layer, bbox_tensors)
    utils.load_weights(model, "yolov3_union_10000.weights")
    model.summary()

    vid = cv2.VideoCapture(video_url)
    while True:
        return_value, frame = vid.read()
        if return_value:
            frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
        else:
            return "No image"
        frame_size = frame.shape[:2]
        image_data = utils.image_preporcess(np.copy(frame),
                                            [input_size, input_size])
        image_data = image_data[np.newaxis, ...].astype(np.float32)

        prev_time = time.time()
        pred_bbox = model.predict_on_batch(image_data)
        curr_time = time.time()
        exec_time = curr_time - prev_time

        pred_bbox = [tf.reshape(x, (-1, tf.shape(x)[-1])) for x in pred_bbox]
        pred_bbox = tf.concat(pred_bbox, axis=0)
        bboxes = utils.postprocess_boxes(pred_bbox, frame_size, input_size,
                                         0.3)
        bboxes = utils.nms(bboxes, threshold, method='nms')
        image = utils.draw_bbox(frame, bboxes)

        result = np.asarray(image)
        info = "time: %.2f ms" % (1000 * exec_time)
        cv2.putText(result,
                    text=info,
                    org=(50, 70),
                    fontFace=cv2.FONT_HERSHEY_SIMPLEX,
                    fontScale=1,
                    color=(255, 0, 0),
                    thickness=2)

        result = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)

        retval, buffer = cv2.imencode(".jpeg", image)
        yield ((b'--frame\r\n'
                b'Content-Type: image/jpeg\r\n\r\n' + buffer.tobytes() +
                b'\r\n'))
Esempio n. 5
0
def load_Yolo_model():
    """Load a yolo model and its weights for inference."""
    input_layer = tf.keras.layers.Input([None, None, 3])
    feature_maps = YOLOv3(input_layer)

    bbox_tensors = []
    for i, fm in enumerate(feature_maps):
        bbox_tensor = decode(fm, i)
        bbox_tensors.append(bbox_tensor)

    model = tf.keras.Model(input_layer, bbox_tensors)
    utils.load_weights(model, "./yolov3.weights")
    return model
Esempio n. 6
0
    def __loadModel(self):
        input_layer = tf.keras.layers.Input(
            [self._INPUT_SIZE, self._INPUT_SIZE, 3])
        feature_maps = YOLOv3(input_layer)

        bbox_tensors = []
        for i, fm in enumerate(feature_maps):
            bbox_tensor = decode(fm, i)
            bbox_tensors.append(bbox_tensor)

        model = tf.keras.Model(input_layer, bbox_tensors)
        model.load_weights(self._args.ckpt_path)

        return model
Esempio n. 7
0
def recv_Img(data,img):
    global flag,model,input_size
    start_time = time.time()
    frame_id = data['frame']
    frame_proc = data['proc']
    original_image= pickle.loads(img)

    class_names = {}
    with open(cfg.YOLO.CLASSES, 'r') as data:
        for ID, name in enumerate(data):
            class_names[ID] = name.strip('\n')

    # Setup tensorflow, keras and YOLOv3

    original_image      = cv2.cvtColor(original_image, cv2.COLOR_BGR2RGB)
    original_image_size = original_image.shape[:2]

    image_data = utils.image_preporcess(np.copy(original_image), [input_size, input_size])
    image_data = image_data[np.newaxis, ...].astype(np.float32)

    if flag:    
        input_layer  = tf.keras.layers.Input([input_size, input_size, 3])
        feature_maps = YOLOv3(input_layer)
        bbox_tensors = []
        for i, fm in enumerate(feature_maps):
            bbox_tensor = decode(fm, i)
            bbox_tensors.append(bbox_tensor)
        model = tf.keras.Model(input_layer, bbox_tensors)
        utils.load_weights(model, "./yolov3.weights")
        flag=False
    pred_bbox = model.predict(image_data)
    pred_bbox = [tf.reshape(x, (-1, tf.shape(x)[-1])) for x in pred_bbox]
    pred_bbox = tf.concat(pred_bbox, axis=0)

    bboxes = utils.postprocess_boxes(pred_bbox, original_image_size, input_size, 0.3)
    bboxes = utils.nms(bboxes, 0.45, method='nms')

    # We have our objects detected and boxed, lets move the class name into a list
    objects_detected = []
    for x0,y0,x1,y1,prob,class_id in bboxes:
        objects_detected.append(class_names[class_id])
    #put classes and its frequency on a dictionary 
    final_dict={x:objects_detected.count(x) for x in set(objects_detected)}

    elapsed_time = time.time() - start_time
    
    message={"frame": frame_id, 'proc': frame_proc,'classes':final_dict,'timestamp':elapsed_time}
    endpoint="http://" + srvr + ':'  + prt+"/result"
    requests.post(endpoint,json=message)
    return message
Esempio n. 8
0
def detect(image_path):
    #original_image=Image.open(BytesIO(original_image)).convert("RGBA")
    original_image = cv2.imread(
        image_path
    )  #you can and should replace this line to receive the image directly (not from a file)
    #original_image = base64.b64decode(dec_image)
    # Read class names
    class_names = {}
    with open(cfg.YOLO.CLASSES, 'r') as data:
        for ID, name in enumerate(data):
            class_names[ID] = name.strip('\n')

    # Setup tensorflow, keras and YOLOv3
    input_size = 416
    input_layer = tf.keras.layers.Input([input_size, input_size, 3])
    feature_maps = YOLOv3(input_layer)

    original_image = cv2.cvtColor(original_image, cv2.COLOR_BGR2RGB)
    original_image_size = original_image.shape[:2]

    image_data = utils.image_preporcess(np.copy(original_image),
                                        [input_size, input_size])
    image_data = image_data[np.newaxis, ...].astype(np.float32)

    bbox_tensors = []
    for i, fm in enumerate(feature_maps):
        bbox_tensor = decode(fm, i)
        bbox_tensors.append(bbox_tensor)

    model = tf.keras.Model(input_layer, bbox_tensors)
    load_weights(model, "./yolov3.weights")

    pred_bbox = model.predict(image_data)
    pred_bbox = [tf.reshape(x, (-1, tf.shape(x)[-1])) for x in pred_bbox]
    pred_bbox = tf.concat(pred_bbox, axis=0)

    bboxes = utils.postprocess_boxes(pred_bbox, original_image_size,
                                     input_size, 0.3)
    bboxes = utils.nms(bboxes, 0.45, method='nms')

    # We have our objects detected and boxed, lets move the class name into a list
    objects_detected = []
    for x0, y0, x1, y1, prob, class_id in bboxes:
        objects_detected.append(class_names[class_id])

    # Lets show the user a nice picture - should be erased in production
    #image = utils.draw_bbox(original_image, bboxes)
    #image = Image.fromarray(image)
    #image.show()
    return objects_detected
Esempio n. 9
0
def create_coco_model():

    input_size = 416
    input_layer = tf.keras.layers.Input([input_size, input_size, 3])
    feature_maps = YOLOv3(input_layer)

    bbox_tensors = []
    for i, fm in enumerate(feature_maps):
        bbox_tensor = decode(fm, i)
        bbox_tensors.append(bbox_tensor)

    model = tf.keras.Model(input_layer, bbox_tensors)
    model.summary()
    utils.load_weights(model, "./yolov3.weights")
    return model
Esempio n. 10
0
def detect():
    global vid, outputFrame, lock
    num_classes = 80
    input_size = 416

    input_layer = tf.keras.layers.Input([input_size, input_size, 3])
    feature_maps = YOLOv3(input_layer)

    bbox_tensors = []
    for i, fm in enumerate(feature_maps):
        bbox_tensor = decode(fm, i)
        bbox_tensors.append(bbox_tensor)

    model = tf.keras.Model(input_layer, bbox_tensors)
    utils.load_weights(model, "./yolov3.weights")
    while True:
        detect_yolov3(vid,model,input_size)
 def __init__(self, address, port):
     self.server_address = address
     self.server_port = port
     self.app = Flask(__name__)
     self.worker_port = 0
     self.image_details = []
     self.bbox_tensors = []
     # Setup tensorflow, keras and YOLOv3
     self.input_size = 416
     self.input_layer = tf.keras.layers.Input(
         [self.input_size, self.input_size, 3])
     self.feature_maps = YOLOv3(self.input_layer)
     for i, fm in enumerate(self.feature_maps):
         bbox_tensor = decode(fm, i)
         self.bbox_tensors.append(bbox_tensor)
     self.model = tf.keras.Model(self.input_layer, self.bbox_tensors)
     utils.load_weights(self.model, "./yolov3.weights")
     #app routing
     self.app.route('/receiveFrame', methods=['POST',
                                              'GET'])(self.receiveFrame)
     self.main()
Esempio n. 12
0
from core.yolov3 import YOLOv3, decode
from PIL import Image
from core.config import cfg

num_class       = 80
input_size      = 416

image_path      = "./docs/kite.jpg"
original_image = cv2.imread(image_path)
original_image = cv2.cvtColor(original_image, cv2.COLOR_BGR2RGB)
original_image_size = original_image.shape[:2]
image_data = utils.image_preporcess(np.copy(original_image), [input_size, input_size])
image_data = image_data[np.newaxis, ...].astype(np.float32)

input_layer = tf.keras.layers.Input([input_size, input_size, 3])
feature_maps = YOLOv3(input_layer)

bbox_tensors = []
for i, fm in enumerate(feature_maps):
    bbox_tensor = decode(fm, i)
    bbox_tensors.append(tf.reshape(bbox_tensor, (-1, 5+num_class)))

bbox_tensors = tf.concat(bbox_tensors, axis=0)
model = tf.keras.Model(input_layer, bbox_tensors)
utils.load_weights(model, "./yolov3.weights")

pred_bbox = model(image_data)
bboxes = utils.postprocess_boxes(pred_bbox, original_image_size, input_size, 0.3)
bboxes = utils.nms(bboxes, 0.45, method='nms')
image = utils.draw_bbox(original_image, bboxes)
image = Image.fromarray(image)
Esempio n. 13
0
    def __init__(self):

        self.anchor_per_scale = cfg.YOLO.ANCHOR_PER_SCALE
        self.classes = utils.read_class_names(cfg.YOLO.CLASSES)
        self.num_classes = len(self.classes)
        self.learn_rate_init = cfg.TRAIN.LEARN_RATE_INIT
        self.learn_rate_end = cfg.TRAIN.LEARN_RATE_END
        self.first_stage_epochs = cfg.TRAIN.FIRST_STAGE_EPOCHS
        self.second_stage_epochs = cfg.TRAIN.SECOND_STAGE_EPOCHS
        self.warmup_periods = cfg.TRAIN.WARMUP_EPOCHS
        self.initial_weight = cfg.TRAIN.INITIAL_WEIGHT
        self.time = time.strftime('%Y-%m-%d-%H-%M-%S',
                                  time.localtime(time.time()))
        self.moving_ave_decay = cfg.YOLO.MOVING_AVE_DECAY
        self.max_bbox_per_scale = 150
        self.train_logdir = "./data/log/train"
        self.trainset = Create_Dataset('train')
        self.testset = Create_Dataset('test')
        self.steps_per_period = len(self.trainset)
        self.sess = tf.Session(config=tf.ConfigProto(
            allow_soft_placement=True))

        with tf.name_scope('Define_Input'):
            self.input_data = tf.placeholder(dtype=tf.float32,
                                             name='input_data')
            self.label_sbbox = tf.placeholder(dtype=tf.float32,
                                              name='label_sbbox')
            self.label_mbbox = tf.placeholder(dtype=tf.float32,
                                              name='label_mbbox')
            self.label_lbbox = tf.placeholder(dtype=tf.float32,
                                              name='label_lbbox')
            self.true_sbbox = tf.placeholder(dtype=tf.float32,
                                             name='true_sbbox')
            self.true_mbbox = tf.placeholder(dtype=tf.float32,
                                             name='true_mbbox')
            self.true_lbbox = tf.placeholder(dtype=tf.float32,
                                             name='true_lbbox')
            self.trainable = tf.placeholder(dtype=tf.bool, name='training')

        with tf.name_scope('Define_Loss'):
            self.model = YOLOv3(self.input_data, self.trainable)
            self.net_var = tf.global_variables()
            self.giou_loss, self.conf_loss, self.proba_loss = self.model.compute_loss(
                self.label_sbbox, self.label_mbbox, self.label_lbbox,
                self.true_sbbox, self.true_mbbox, self.true_lbbox)
            self.loss = self.giou_loss + self.conf_loss + self.proba_loss

        with tf.name_scope('Learning_Rate'):
            self.global_step = tf.constant(1.0,
                                           dtype=tf.float64,
                                           trainable=False,
                                           name='global_step')

            warmup_steps = tf.constant(self.warmup_periods *
                                       self.steps_per_period,
                                       dtype=tf.float64,
                                       name='warm_up_steps')

            train_steps = tf.constant(
                (self.first_stage_epochs + self.second_stage_epochs) *
                self.steps_per_period,
                dtype=tf.float64,
                name='Training_Steps')

            self.learning_rate = tf.cond(
                pred=self.global_step < self.warmup_steps,
                true_fn=lambda: self.global_step / self.warmup_steps * self.
                learn_rate_init,
                false_fn=lambda: self.learn_rate_end + 0.5
                (self.learn_rate_init - self.learn_rate_end) * (1 + tf.cos(
                    (self.global_step - warmup_steps) /
                    (train_steps - warmup_steps) * np.pi)))
            global_step_update = tf.assign_add(self.global_step, 1.0)

        with tf.name_scope('weight_decay'):
            moving_avg = tf.train.ExponentialMovingAverage(
                self.moving_ave_decay).apply(tf.trainable_variables())

        with tf.name_scope("define_first_stage_train"):
            self.first_stage_trainable_var_list = []
            for var in tf.trainable_variables():
                var_name = var.op.name
                var_name_mess = str(var_name).split('/')
                if var_name_mess[0] in [
                        'conv_sbbox', 'conv_mbbox', 'conv_lbbox'
                ]:
                    self.first_stage_trainable_var_list.append(var)

            first_stage_optimizer = tf.train.AdamOptimizer(
                self.learning_rate).minimize(
                    self.loss, var_list=self.first_stage_trainable_var_list)
            with tf.control_dependencies(
                    tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
                with tf.control_dependencies(
                    [first_stage_optimizer, global_step_update]):
                    with tf.control_dependencies([moving_avg]):
                        self.train_op_with_frozen_variables = tf.no_op()

        with tf.name_scope("define_second_stage_train"):
            second_stage_trainable_var_list = tf.trainable_variables()
            second_stage_optimizer = tf.train.AdamOptimizer(
                self.learn_rate).minimize(
                    self.loss, var_list=second_stage_trainable_var_list)

            with tf.control_dependencies(
                    tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
                with tf.control_dependencies(
                    [second_stage_optimizer, global_step_update]):
                    with tf.control_dependencies([moving_avg]):
                        self.train_op_with_all_variables = tf.no_op()

        with tf.name_scope('loader_and_saver'):
            self.loader = tf.train.Saver(self.net_var)
            self.saver = tf.train.Saver(tf.global_variables(), max_to_keep=10)

        with tf.name_scope('summary'):
            tf.summary.scalar("learn_rate", self.learn_rate)
            tf.summary.scalar("giou_loss", self.giou_loss)
            tf.summary.scalar("conf_loss", self.conf_loss)
            tf.summary.scalar("prob_loss", self.prob_loss)
            tf.summary.scalar("total_loss", self.loss)

            logdir = "./data/log/"
            if os.path.exists(logdir): shutil.rmtree(logdir)
            os.mkdir(logdir)
            self.write_op = tf.summary.merge_all()
            self.summary_writer = tf.summary.FileWriter(logdir,
                                                        graph=self.sess.graph)
Esempio n. 14
0
# os.environ["CUDA_VISIBLE_DEVICES"] = "-1"


"""get training images and labels"""
trainset = Dataset('train')    # creat a Dataset object which will be used for generating batch_image, (batch_smaller_label, batch_medium_label, batch_larger_label)
logdir = "C:/PycharmProjects/YOLOV3_Github/data/log"    # file for saving logs
steps_per_epoch = len(trainset)    # 250
global_steps = tf.Variable(1, trainable=False, dtype=tf.int64)
warmup_steps = cfg.TRAIN.WARMUP_EPOCHS * steps_per_epoch    # 2 epochs
total_steps = cfg.TRAIN.EPOCHS * steps_per_epoch    # 30 train epochs
tf.print('steps_per_epoch:%d' %steps_per_epoch)


"""build model and load weights"""
input_tensor = tf.keras.layers.Input([416, 416, 3])    # create an input object with tensor shape (416,416,3), no batch is needed
conv_tensors = YOLOv3(input_tensor)    # chain input layer and hidden layers, get output [[b,52,52,3*(5+c)], [b,26,26,3*(5+c)], [b,13,13,3*(5+c)]]
output_tensors = []
for i, conv_tensor in enumerate(conv_tensors):
    pred_tensor = decode(conv_tensor, i)
    output_tensors.append(conv_tensor)    # output layer [batch, grid, grid, 3*(5+c)]
    output_tensors.append(pred_tensor)    # decoded output layer [batch, grid, grid, 3*(5+c)]
model = tf.keras.Model(input_tensor, output_tensors)    # generate a model object based on input layer and output layer
utils.load_weights(model, "./yolov3.weights")    # load weights
model.summary()


# """set up optimizer, tensorboard"""
# optimizer = tf.keras.optimizers.Adam(learning_rate=1e-4)
# if os.path.exists(logdir): shutil.rmtree(logdir)    # remove logdir folder and its files recursively
# writer = tf.summary.create_file_writer(logdir)    # creat a summary file which can be visualised by TensorBoard
#
Esempio n. 15
0
import tensorflow as tf
import core.utils as utils
from tqdm import tqdm
from core.dataset import Dataset
from core.yolov3 import YOLOv3, decode, compute_loss
from core.config import cfg

trainset = Dataset('train')
logdir = "./data/log"
steps_per_epoch = len(trainset)
global_steps = tf.Variable(1, trainable=False, dtype=tf.int64)
warmup_steps = cfg.TRAIN.WARMUP_EPOCHS * steps_per_epoch
total_steps = cfg.TRAIN.EPOCHS * steps_per_epoch

input_tensor = tf.keras.layers.Input([416, 416, 3])
conv_tensors = YOLOv3(input_tensor)

output_tensors = []
for i, conv_tensor in enumerate(conv_tensors):
    pred_tensor = decode(conv_tensor, i)
    output_tensors.append(conv_tensor)
    output_tensors.append(pred_tensor)

model = tf.keras.Model(input_tensor, output_tensors)
optimizer = tf.keras.optimizers.Adam()
if os.path.exists(logdir): shutil.rmtree(logdir)
writer = tf.summary.create_file_writer(logdir)

def train_step(image_data, target):
    with tf.GradientTape() as tape:
        pred_result = model(image_data, training=True)
Esempio n. 16
0
def yolov3(file_path, file, output_path):
    input_size = 416
    # image_path   = "./docs/2.jpg"
    # image_path   = "./test.jpg"
    input_path = os.path.join(file_path, file)

    input_layer = tf.keras.layers.Input([input_size, input_size, 3])
    feature_maps = YOLOv3(input_layer)

    original_image = cv2.imread(input_path)
    original_image = cv2.cvtColor(original_image, cv2.COLOR_BGR2RGB)
    original_image_size = original_image.shape[:2]

    image_data = utils.image_preporcess(np.copy(original_image),
                                        [input_size, input_size])
    image_data = image_data[np.newaxis, ...].astype(np.float32)

    bbox_tensors = []
    for i, fm in enumerate(feature_maps):
        bbox_tensor = decode(fm, i)
        bbox_tensors.append(bbox_tensor)

    model = tf.keras.Model(input_layer, bbox_tensors)
    utils.load_weights(model, "./yolov3.weights")
    model.summary()

    pred_bbox = model.predict(image_data)
    pred_bbox = [tf.reshape(x, (-1, tf.shape(x)[-1])) for x in pred_bbox]
    pred_bbox = tf.concat(pred_bbox, axis=0)
    bboxes = utils.postprocess_boxes(pred_bbox, original_image_size,
                                     input_size, 0.3)
    bboxes = utils.nms(bboxes, 0.45, method='nms')
    """
            bboxes: [x_min, y_min, x_max, y_max, probability, cls_id] format coordinates.
    """
    box_matrix = np.matrix(bboxes)

    box_size = np.zeros((len(box_matrix), 5))
    for i in range(0, len(box_matrix)):
        box_size[i, 0] = box_matrix[i, 0]
        box_size[i, 1] = box_matrix[i, 1]
        box_size[i, 2] = box_matrix[i, 2]
        box_size[i, 3] = box_matrix[i, 3]
        box_size[i, 4] = box_matrix[i, 5]
    name = ['x_min', 'y_min', 'x_max', 'y_max', 'cls_id']

    # make dictionary
    id_name = {}

    with open('./name.csv', 'r', encoding='utf-8') as F:
        reader = csv.reader(F)
        data = [a for a in reader]
        for i in range(len(data)):
            id_name[i] = data[i][0]

    class_id = list()
    # map id and names
    for i in range(0, len(box_size)):
        class_id.append(id_name[int(box_size[i, 4])])

    box_size_data = pd.DataFrame(columns=name, data=box_size)
    box_size_data['class_id'] = class_id
    print(box_size_data)

    box_size_data = box_size_data.drop(['cls_id'], axis=1)
    print(box_size_data)

    csv_path = r'./yolo_coordinates'
    # print(box_size_data)
    box_size_data.to_csv(os.path.join(csv_path,
                                      os.path.splitext(file)[0] + '.csv'),
                         encoding='gbk',
                         index=False)
    # print(bboxes[1])
    image = utils.draw_bbox(original_image, bboxes)

    image = Image.fromarray(image)
    # image.show()
    image.save(os.path.join(output_path, file))
Esempio n. 17
0
 def __enter__(self):
     self._vid = cv2.VideoCapture("")
     self._input_layer  = tf.keras.layers.Input([self._input_size, self._input_size, 3])
     self._feature_maps = YOLOv3(self._input_layer)
     return self
Esempio n. 18
0
from core.yolov3 import YOLOv3, decode
from PIL import Image
import os

# gpus = tf.config.experimental.list_physical_devices('GPU')    # return all GPUs
# tf.config.experimental.set_memory_growth(device=gpus[0], enable=True)    # dynamically use GPU memory
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"  # use CPU

input_size = 416
image_path = "./demo/000000009590.jpg"
"""build model and load weights"""
input_layer = tf.keras.Input(
    shape=(input_size, input_size,
           3))  # instantiate an input layer with tensor
feature_maps = YOLOv3(
    input_layer
)  # chain input layer and hidden layers, get output [[b,52,52,3*(5+c)], [b,26,26,3*(5+c)], [b,13,13,3*(5+c)]]
bbox_tensors = []
for i, fm in enumerate(feature_maps):
    bbox_tensor = decode(fm,
                         i)  # decode into [x,y,w,h,c] corresponding to 416*416
    bbox_tensors.append(
        bbox_tensor
    )  # bbox_tensors [[b,52,52,3,5+c], [b,26,26,3,5+c], [b,13,13,3,5+c]]
model = tf.keras.Model(
    input_layer, bbox_tensors
)  # generate a model object based on input layer and output layer
utils.load_weights(model, "./yolov3.weights")
model.summary()  # print model information
"""image proprecess"""
original_image = cv2.imread(image_path)  # read test image
Esempio n. 19
0
def work_frame(filename, count):
    global flag,model,server_addr,input_size

    start_time = datetime.datetime.now()

    # GET frame from server by server /static reference
    video_id = "video_" + str(filename) + "_frame_" + str(count) + ".jpg"
    ref_file = "static/" + video_id
    response = requests.get(server_addr + ref_file)

    # Image transformation to accepted format
    arr = np.asarray(bytearray(response.content), dtype=np.uint8)
    original_image = cv2.imdecode(arr, -1)
    
    ###### OBJECT DETECTION CODE #######

    # Read class names
    class_names = {}
    with open(cfg.YOLO.CLASSES, 'r') as data:
        for ID, name in enumerate(data):
            class_names[ID] = name.strip('\n')

    # Setup tensorflow, keras and YOLOv3

    original_image      = cv2.cvtColor(original_image, cv2.COLOR_BGR2RGB)
    original_image_size = original_image.shape[:2]

    image_data = utils.image_preporcess(np.copy(original_image), [input_size, input_size])
    image_data = image_data[np.newaxis, ...].astype(np.float32)
  
    if flag:
        input_layer  = tf.keras.layers.Input([input_size, input_size, 3])
        feature_maps = YOLOv3(input_layer)
        bbox_tensors = []
        for i, fm in enumerate(feature_maps):
            bbox_tensor = decode(fm, i)
            bbox_tensors.append(bbox_tensor)
        model = tf.keras.Model(input_layer, bbox_tensors)
        utils.load_weights(model, "./yolov3.weights")
        flag = False
    
    pred_bbox = model.predict(image_data)
    pred_bbox = [tf.reshape(x, (-1, tf.shape(x)[-1])) for x in pred_bbox]
    pred_bbox = tf.concat(pred_bbox, axis=0)

    bboxes = utils.postprocess_boxes(pred_bbox, original_image_size, input_size, 0.3)
    bboxes = utils.nms(bboxes, 0.45, method='nms')

    # We have our objects detected and boxed, lets move the class name into a list
    objects_detected = []
    for x0,y0,x1,y1,prob,class_id in bboxes:
        objects_detected.append(class_names[class_id])

    ### END OF OBJECT DETECTION CODE ###

    print(objects_detected)
    
    final_time = datetime.datetime.now() - start_time
    
    # Elaborate json with frame info and post to server in /return route
    final_dict={}
    people_count=0
    
    for obj in objects_detected:
        if str(obj) == "person":
            people_count+=1
        if str(obj) in final_dict:
            final_dict[str(obj)] += 1
        else:
            final_dict[str(obj)] = 1
    
    final_json = {
        "video_id":filename, 
        "frame_no":count, 
        "processing_time":str(final_time),
        "people_detected":people_count,
        "objects_detected":json.dumps(final_dict)
    }
    
    requests.post(server_addr + "return", json=final_json)
    return "\nDONE frame n. " + str(count) + "of video " + filename + "!\n"
import tensorflow as tf
import cv2
import numpy as np
from PIL import Image

from core.yolov3 import YOLOv3, decode
import core.utils as utils
"""
1、定义模型
"""
input_size = 416
image_path = "D:/Anacoda/YOLO_v3_s/docs/kite.jpg"
input_layer = tf.keras.layers.Input(shape=[input_size, input_size, 3])
conv_bboxes = YOLOv3(input_layer)
output_layers = []
for i, conv_bbox in enumerate(conv_bboxes):
    pred_bbox = decode(conv_bbox, i)
    output_layers.append(pred_bbox)

model = tf.keras.Model(inputs=input_layer, outputs=output_layers)
# 加载权重
utils.load_weights(model, r"D:\Anacoda\YOLO_v3_s\docs\yolov3.weights")
model.summary()
"""
2、读取测试图片
"""
original_image = cv2.imread(image_path)
original_image = cv2.cvtColor(original_image, cv2.COLOR_BGR2RGB)
original_size = original_image.shape[:2]

image_data = utils.image_preprocess(np.copy(original_image), input_size)