def main(): webapi = WebAPI(IP_ADDR, PORT, ACCOUNT, PASSWORD) cameras = webapi.list_cameras() camera_ids = [camera['id'] for camera in cameras] # the last added camera in the Surveillance Station camera_id = camera_ids[-1] rtsp = webapi.get_liveview_rtsp(camera_id) fall_label = webapi.create_recording_label('fall_event') # Initialize fall detection model yolo = Create_Yolo(input_size=YOLO_INPUT_SIZE) load_yolo_weights(yolo, YOLO_V3_WEIGHTS) # use Darknet weights classes = read_class_names(YOLO_COCO_CLASSES) q = queue.Queue() p1 = threading.Thread(target=read_frame, args=([q, rtsp])) p2 = threading.Thread( target=process_frame, args=([q, webapi, camera_id, yolo, classes, fall_label])) p1.start() p2.start() p1.join() p2.join() webapi.logout()
def process_recordings(q, webapi): # Initialize clothe category classifier cfg = Config.fromfile(CONFIG_FILE) landmark_tensor = torch.zeros(8) model = build_predictor(cfg.model) load_checkpoint(model, CHECKPOINT_FILE, map_location='cpu') print('model loaded from {}'.format(CHECKPOINT_FILE)) if USE_CUDA: model.cuda() landmark_tensor = landmark_tensor.cuda() model.eval() cate_predictor = CatePredictor(cfg.data.test, tops_type=[1]) # Initialize tracker model yolo = Create_Yolo(input_size=YOLO_INPUT_SIZE) load_yolo_weights(yolo, Darknet_weights) # use Darknet weights while True: if q.empty(): time.sleep(1) continue recording_id, recording_filename = q.get() if recording_id == -1: break recording_filepath = os.path.join(RECORDINGS_DIRECTORY, recording_filename) Object_tracking(yolo, webapi, recording_id, recording_filepath, model, cate_predictor, landmark_tensor, iou_threshold=0.1, rectangle_colors=(255, 0, 0), Track_only=["person"])
# GitHub : https://github.com/pythonlessons/TensorFlow-2.x-YOLOv3 # Description : object detection image and video example # #================================================================ import os os.environ['CUDA_VISIBLE_DEVICES'] = '-1' import cv2 import numpy as np import tensorflow as tf from yolov3.yolov3 import Create_Yolov3 from yolov3.utils import load_yolo_weights, detect_image, detect_video from yolov3.configs import * input_size = YOLO_INPUT_SIZE Darknet_weights = YOLO_DARKNET_WEIGHTS image_path = "./IMAGES/street.jpg" video_path = "./IMAGES/city_drive.mp4" yolo = Create_Yolov3(input_size=input_size) load_yolo_weights(yolo, Darknet_weights) # use Darknet weights detect_image(yolo, image_path, "", input_size=input_size, show=True, rectangle_colors=(255, 0, 0)) #detect_video(yolo, video_path, '', input_size=input_size, show=True, rectangle_colors=(255,0,0)) #detect_realtime(yolo, input_size=input_size, rectangle_colors=(255, 0, 0))
def main(): global TRAIN_FROM_CHECKPOINT gpus = tf.config.experimental.list_physical_devices('GPU') if len(gpus) > 0: try: tf.config.experimental.set_memory_growth(gpus[0], True) except RuntimeError: pass if os.path.exists(TRAIN_LOGDIR): shutil.rmtree(TRAIN_LOGDIR) writer = tf.summary.create_file_writer(TRAIN_LOGDIR) trainset = Dataset('train') testset = Dataset('test') steps_per_epoch = len(trainset) global_steps = tf.Variable(1, trainable=False, dtype=tf.int64) warmup_steps = TRAIN_WARMUP_EPOCHS * steps_per_epoch total_steps = TRAIN_EPOCHS * steps_per_epoch if TRAIN_TRANSFER: Darknet = Create_Yolov3(input_size=YOLO_INPUT_SIZE) load_yolo_weights(Darknet, Darknet_weights) # use darknet weights #load_tiny_yolo_weights(Darknet, Darknet_weights) # use darknet weights yolo = Create_Yolov3(input_size=YOLO_INPUT_SIZE, training=True, CLASSES=TRAIN_CLASSES) if TRAIN_FROM_CHECKPOINT: try: checkpoint_file = TRAIN_CHECKPOINTS_FOLDER + '/' + TRAIN_MODEL_NAME + '.h5' yolo.load_weights(checkpoint_file) except ValueError: print("Shapes are incompatible, transfering Darknet weights") TRAIN_FROM_CHECKPOINT = False if TRAIN_TRANSFER and not TRAIN_FROM_CHECKPOINT: for i, l in enumerate(Darknet.layers): layer_weights = l.get_weights() if layer_weights != []: try: yolo.layers[i].set_weights(layer_weights) except: print("skipping", yolo.layers[i].name) optimizer = tf.keras.optimizers.Adam() def train_step(image_data, target): with tf.GradientTape() as tape: pred_result = yolo(image_data, training=True) giou_loss = conf_loss = prob_loss = 0 # optimizing process grid = 3 if not TRAIN_YOLO_TINY else 2 for i in range(grid): conv, pred = pred_result[i * 2], pred_result[i * 2 + 1] loss_items = compute_loss(pred, conv, *target[i], i, CLASSES=TRAIN_CLASSES) giou_loss += loss_items[0] conf_loss += loss_items[1] prob_loss += loss_items[2] total_loss = giou_loss + conf_loss + prob_loss gradients = tape.gradient(total_loss, yolo.trainable_variables) optimizer.apply_gradients(zip(gradients, yolo.trainable_variables)) # update learning rate # about warmup: https://arxiv.org/pdf/1812.01187.pdf&usg=ALkJrhglKOPDjNt6SHGbphTHyMcT0cuMJg global_steps.assign_add(1) if global_steps < warmup_steps: # and not TRAIN_TRANSFER: lr = global_steps / warmup_steps * TRAIN_LR_INIT else: lr = TRAIN_LR_END + 0.5 * (TRAIN_LR_INIT - TRAIN_LR_END) * ( (1 + tf.cos((global_steps - warmup_steps) / (total_steps - warmup_steps) * np.pi))) optimizer.lr.assign(lr.numpy()) # writing summary data with writer.as_default(): tf.summary.scalar("lr", optimizer.lr, step=global_steps) tf.summary.scalar("loss/total_loss", total_loss, step=global_steps) tf.summary.scalar("loss/giou_loss", giou_loss, step=global_steps) tf.summary.scalar("loss/conf_loss", conf_loss, step=global_steps) tf.summary.scalar("loss/prob_loss", prob_loss, step=global_steps) writer.flush() return global_steps.numpy(), optimizer.lr.numpy(), giou_loss.numpy( ), conf_loss.numpy(), prob_loss.numpy(), total_loss.numpy() validate_writer = tf.summary.create_file_writer(TRAIN_LOGDIR) def validate_step(image_data, target): with tf.GradientTape() as tape: pred_result = yolo(image_data, training=False) giou_loss = conf_loss = prob_loss = 0 # optimizing process grid = 3 if not TRAIN_YOLO_TINY else 2 for i in range(grid): conv, pred = pred_result[i * 2], pred_result[i * 2 + 1] loss_items = compute_loss(pred, conv, *target[i], i, CLASSES=TRAIN_CLASSES) giou_loss += loss_items[0] conf_loss += loss_items[1] prob_loss += loss_items[2] total_loss = giou_loss + conf_loss + prob_loss return giou_loss.numpy(), conf_loss.numpy(), prob_loss.numpy( ), total_loss.numpy() best_val_loss = 1000 # should be large at start for epoch in range(TRAIN_EPOCHS): for image_data, target in trainset: results = train_step(image_data, target) cur_step = results[0] % steps_per_epoch print( "epoch:{:2.0f} step:{:5.0f}/{}, lr:{:.6f}, giou_loss:{:7.2f}, conf_loss:{:7.2f}, prob_loss:{:7.2f}, total_loss:{:7.2f}" .format(epoch, cur_step, steps_per_epoch, results[1], results[2], results[3], results[4], results[5])) if len(testset) == 0: print("configure TEST options to validate model") yolo.save_weights( os.path.join(TRAIN_CHECKPOINTS_FOLDER, TRAIN_MODEL_NAME)) continue count, giou_val, conf_val, prob_val, total_val = 0., 0, 0, 0, 0 for image_data, target in testset: results = validate_step(image_data, target) count += 1 giou_val += results[0] conf_val += results[1] prob_val += results[2] total_val += results[3] # writing validate summary data with validate_writer.as_default(): tf.summary.scalar("validate_loss/total_val", total_val / count, step=epoch) tf.summary.scalar("validate_loss/giou_val", giou_val / count, step=epoch) tf.summary.scalar("validate_loss/conf_val", conf_val / count, step=epoch) tf.summary.scalar("validate_loss/prob_val", prob_val / count, step=epoch) validate_writer.flush() print( "\n\ngiou_val_loss:{:7.2f}, conf_val_loss:{:7.2f}, prob_val_loss:{:7.2f}, total_val_loss:{:7.2f}\n\n" .format(giou_val / count, conf_val / count, prob_val / count, total_val / count)) if TRAIN_SAVE_CHECKPOINT and not TRAIN_SAVE_BEST_ONLY: yolo.save_weights( os.path.join( TRAIN_CHECKPOINTS_FOLDER, TRAIN_MODEL_NAME + "_val_loss_{:7.2f}".format(total_val / count))) if TRAIN_SAVE_BEST_ONLY and best_val_loss > total_val / count: yolo.save_weights( os.path.join(TRAIN_CHECKPOINTS_FOLDER, TRAIN_MODEL_NAME)) best_val_loss = total_val / count if not TRAIN_SAVE_BEST_ONLY and not TRAIN_SAVE_CHECKPOINT: yolo.save_weights( os.path.join(TRAIN_CHECKPOINTS_FOLDER, TRAIN_MODEL_NAME))
if tf.config.experimental.list_physical_devices("GPU") else "사용 불가능") print(f'cv version {cv2.__version__}') # tf.debugging.set_log_device_placement(True) # %% input_size = YOLO_INPUT_SIZE Darknet_weights = YOLO_DARKNET_WEIGHTS if TRAIN_YOLO_TINY: Darknet_weights = YOLO_DARKNET_TINY_WEIGHTS # video_path = "./IMAGES/street_drive.mp4" yolo = Create_Yolov3(input_size=input_size, CLASSES='../' + YOLO_COCO_CLASSES) load_yolo_weights(yolo, '../' + Darknet_weights) # use Darknet weights print(f'weight data load ok {Darknet_weights}') # %% image_path = "../IMAGES/kite.jpg" #이미지 로딩 & 전처리 original_image = cv2.imread(image_path) original_image = cv2.cvtColor(original_image, cv2.COLOR_BGR2RGB) # 0~255, 0~1 사이 소수로 바꾸고 크기도 416 싸이즈 안에 맞춰 집어 넣는다. image_data = image_preprocess(np.copy(original_image), [input_size, input_size]) # plt.figure()
from deep_sort.detection import Detection from deep_sort.tracker import Tracker from deep_sort import generate_detections as gdet video_path = "./IMAGES/test.mp4" if YOLO_FRAMEWORK == "tf": # TensorFlow detection if YOLO_TYPE == "yolov4": Darknet_weights = YOLO_V4_TINY_WEIGHTS if TRAIN_YOLO_TINY else YOLO_V4_WEIGHTS if YOLO_TYPE == "yolov3": Darknet_weights = YOLO_V3_TINY_WEIGHTS if TRAIN_YOLO_TINY else YOLO_V3_WEIGHTS yolo = Create_Yolo(input_size=YOLO_INPUT_SIZE) if YOLO_CUSTOM_WEIGHTS != False: yolo.load_weights(YOLO_CUSTOM_WEIGHTS) # use custom weights else: load_yolo_weights(yolo, Darknet_weights) # use MS COCO weights elif YOLO_FRAMEWORK == "trt": # TensorRT detection saved_model_loaded = tf.saved_model.load(YOLO_CUSTOM_WEIGHTS, tags=[tag_constants.SERVING]) signature_keys = list(saved_model_loaded.signatures.keys()) yolo = saved_model_loaded.signatures['serving_default'] def Object_tracking(Yolo, video_path, output_path, input_size=416, show=False, CLASSES=YOLO_COCO_CLASSES, score_threshold=0.3,
import os os.environ['CUDA_VISIBLE_DEVICES'] = '0' import cv2 import numpy as np import tensorflow as tf from yolov3.yolov3 import Create_Yolov3 from yolov3.yolov4 import Create_Yolo from yolov3.utils import load_yolo_weights, detect_realtime from yolov3.configs import * if YOLO_TYPE == "yolov4": Darknet_weights = YOLO_V4_TINY_WEIGHTS if TRAIN_YOLO_TINY else YOLO_V4_WEIGHTS if YOLO_TYPE == "yolov3": Darknet_weights = YOLO_V3_TINY_WEIGHTS if TRAIN_YOLO_TINY else YOLO_V3_WEIGHTS yolo = Create_Yolo(input_size=YOLO_INPUT_SIZE) load_yolo_weights(yolo, Darknet_weights) detect_realtime(yolo, '', input_size=YOLO_INPUT_SIZE, show=True, rectangle_colors=(255, 0, 0))
save_best_only = True # saves only best agent according validation loss save_checkpoints = False # saves all best validates checkpoints in training process (may require a lot disk space) if os.path.exists(logdir): shutil.rmtree(logdir) writer = tf.summary.create_file_writer(logdir) trainset = Dataset('train') testset = Dataset('test') steps_per_epoch = len(trainset) global_steps = tf.Variable(1, trainable=False, dtype=tf.int64) warmup_steps = TRAIN_WARMUP_EPOCHS * steps_per_epoch total_steps = TRAIN_EPOCHS * steps_per_epoch if TRAIN_TRANSFER: Darknet = Create_Yolov3(input_size=input_size) load_yolo_weights(Darknet, Darknet_weights) # use darknet weights yolo = Create_Yolov3(input_size=input_size, training=True, CLASSES=TRAIN_CLASSES) if TRAIN_TRANSFER: for i, l in enumerate(Darknet.layers): layer_weights = l.get_weights() if layer_weights != []: try: yolo.layers[i].set_weights(layer_weights) except: print("skipping", yolo.layers[i].name) optimizer = tf.keras.optimizers.Adam()