val_coco_dir = "" val_img_dir = "" val_set_dir = system["val_monk_img_dir"] else: val_root_dir = system["val_coco_root_dir"] val_coco_dir = system["val_coco_coco_dir"] val_img_dir = system["val_coco_img_dir"] val_set_dir = system["val_coco_set_dir"] system["epochs"] = int(system["epochs"]) system["val_interval"] = int(system["val_interval"]) system["lr"] = float(system["lr"]) gtf = Detector() gtf.Train_Dataset(root_dir, coco_dir, img_dir, set_dir, batch_size=system["batch_size"], image_size=system["image_size"], use_gpu=system["use_gpu"]) if (system["val_data"] == "yes"): gtf.Val_Dataset(val_root_dir, val_coco_dir, val_img_dir, val_set_dir) tmp = system["devices"].split(",") gpu_devices = [] for i in range(len(tmp)):
# In[8]: from train_detector import Detector # In[9]: # pwd # In[10]: gtf = Detector(); # In[11]: root_dir = "."; coco_dir = "coco_dataset_3class"; img_dir = "."; set_dir = "Images"; # In[12]: gtf.Train_Dataset(root_dir, coco_dir, img_dir, set_dir, batch_size=8, image_size=512, use_gpu=True)
val_coco_dir = "" val_img_dir = "" val_set_dir = system["val_monk_img_dir"] else: val_root_dir = system["val_coco_root_dir"] val_coco_dir = system["val_coco_coco_dir"] val_img_dir = system["val_coco_img_dir"] val_set_dir = system["val_coco_set_dir"] system["epochs"] = int(system["epochs"]) system["val_interval"] = int(system["val_interval"]) system["lr"] = float(system["lr"]) gtf = Detector() gtf.Train_Dataset(root_dir, coco_dir, img_dir, set_dir, batch_size=system["batch_size"], use_gpu=system["use_gpu"]) if (system["val_data"] == "yes"): gtf.Val_Dataset(val_root_dir, val_coco_dir, val_img_dir, val_set_dir) tmp = system["devices"].split(",") gpu_devices = [] for i in range(len(tmp)): gpu_devices.append(int(tmp[i]))
# Check TF version import tensorflow as tf print(tf.__version__) import os import sys sys.path.append("MONK/Monk_Object_Detection/13_tf_obj_2/lib/") from train_detector import Detector gtf = Detector() print(gtf.list_models()) train_img_dir = "COCO_CREATION/results/Train/images" train_anno_dir = "COCO_CREATION/results/Train/annotations" class_list_file = "COCO_CREATION/pascal-voc-classes.txt" gtf.set_train_dataset(train_img_dir, train_anno_dir, class_list_file, batch_size=24, trainval_split=0.8) ## Output dir output_dir = os.path.join("data_tfrecord") gtf.create_tfrecord(data_output_dir=output_dir)
copy_to_root() train_num_img=sum([1 for i in os.listdir("/content/Root_Dir/Coco_style/images/Train")]) valid_num_img=sum([1 for i in os.listdir("/content/Root_Dir/Coco_style/images/Val")]) print("There are {} train images and {} valid images.".format(train_num_img,valid_num_img)) #!pip install efficientnet_pytorch #!pip install tensorboardx #https://github.com/Tessellate-Imaging/Monk_Object_Detection/blob/master/example_notebooks/5_pytorch_retinanet/Train%20Resnet18%20-%20With%20validation%20Dataset.ipynb import os os.chdir("/content/Computer-vision-and-Drones-Thesis/models/Modified Monk Retinanet/lib") from train_detector import Detector model = Detector(); os.chdir("/content/") troot_dir = "Root_Dir"; tcoco_dir = "Coco_style"; timg_dir = "images"; tset_dir = "Train"; vroot_dir = "Root_Dir"; vcoco_dir = "Coco_style"; vimg_dir = "images"; vset_dir = "Val";
val_img_dir = ""; val_set_dir = system["val_monk_img_dir"]; else: val_root_dir = system["val_coco_root_dir"]; val_coco_dir = system["val_coco_coco_dir"]; val_img_dir = system["val_coco_img_dir"]; val_set_dir = system["val_coco_set_dir"]; gtf = Detector(); gtf.Train_Dataset(root_dir, coco_dir, img_dir, set_dir, batch_size=system["batch_size"], num_workers=4) if(system["val_data"] == "yes"): gtf.Val_Dataset(val_root_dir, val_coco_dir, val_img_dir, val_set_dir) gtf.Model(model_name=system["model"]); gtf.Hyper_Params(lr=system["lr"],
val_root_dir = system["val_coco_root_dir"] + "/" + system[ "val_coco_coco_dir"] val_img_dir = system["val_coco_img_dir"] + "/" + system[ "val_coco_set_dir"] val_anno_dir = labels_dir else: val_root_dir = system["val_yolo_root_dir"] val_img_dir = system["val_yolo_img_dir"] val_anno_dir = system["val_yolo_anno_dir"] val_classes_file = system["val_yolo_classes_file"] from train_detector import Detector gtf = Detector() gtf.set_train_dataset(root_dir + "/" + img_dir, root_dir + "/" + anno_dir, root_dir + "/" + classes_file, batch_size=system["batch_size"], img_size=system["img_size"], cache_images=system["cache_images"]) if (system["val_data"] == "yes"): gtf.set_val_dataset(val_root_dir + "/" + val_img_dir, val_root_dir + "/" + val_anno_dir) gtf.set_model(model_name=system["model"]) gtf.set_hyperparams(optimizer=system["optimizer"],