Ejemplo n.º 1
0
# In[13]:


gtf.Model();


# In[14]:


gtf.Set_Hyperparams(lr=0.0001, val_interval=1, es_min_delta=0.0, es_patience=0)


# In[ ]:


gtf.Train(num_epochs=30, model_output_dir="trained/");


# In[ ]:





# In[2]:


# import sys
# sys.path.append("Monk_Object_Detection/4_efficientdet/lib/");
# from src.dataset import CocoDataset
# root_dir = "coco_dataset_3class";
Ejemplo n.º 2
0
gtf = Detector()

gtf.Train_Dataset(root_dir,
                  coco_dir,
                  img_dir,
                  set_dir,
                  batch_size=system["batch_size"],
                  image_size=system["image_size"],
                  use_gpu=system["use_gpu"])

if (system["val_data"] == "yes"):
    gtf.Val_Dataset(val_root_dir, val_coco_dir, val_img_dir, val_set_dir)

tmp = system["devices"].split(",")
gpu_devices = []
for i in range(len(tmp)):
    gpu_devices.append(int(tmp[i]))

gtf.Model(gpu_devices=gpu_devices)

gtf.Set_Hyperparams(lr=system["lr"],
                    val_interval=system["val_interval"],
                    es_min_delta=system["es_min_delta"],
                    es_patience=system["es_patience"])

gtf.Train(num_epochs=system["epochs"],
          model_output_dir=system["output_model_dir"])

print("Completed")
system["val_interval"] = int(system["val_interval"])
system["lr"] = float(system["lr"])

gtf = Detector()

gtf.Train_Dataset(root_dir,
                  coco_dir,
                  img_dir,
                  set_dir,
                  batch_size=system["batch_size"],
                  use_gpu=system["use_gpu"])

if (system["val_data"] == "yes"):
    gtf.Val_Dataset(val_root_dir, val_coco_dir, val_img_dir, val_set_dir)

tmp = system["devices"].split(",")
gpu_devices = []
for i in range(len(tmp)):
    gpu_devices.append(int(tmp[i]))

gtf.Model(model_name=system["model"], gpu_devices=gpu_devices)

gtf.Set_Hyperparams(lr=system["lr"],
                    val_interval=system["val_interval"],
                    print_interval=system["print_interval"])

gtf.Train(num_epochs=system["epochs"],
          output_model_name=system["output_model_name"] + ".pt")

print("Completed")
Ejemplo n.º 4
0
timg_dir = "images";
tset_dir = "Train";



vroot_dir = "Root_Dir";
vcoco_dir = "Coco_style";
vimg_dir = "images";
vset_dir = "Val";

model.Train_Dataset(troot_dir, tcoco_dir, timg_dir, tset_dir, batch_size=8, image_size=352, use_gpu=True)
model.Val_Dataset(vroot_dir, vcoco_dir, vimg_dir, vset_dir)

model.Model(model_name="resnet34"); # resnet 50 brought cuda memory error.
model.Set_Hyperparams(lr=0.0001, val_interval=1, print_interval=20)
model.Train(num_epochs=300,output_model_name="karen_model.pt");

from infer_detector import Infer
gtf = Infer();
gtf.Model(model_path="/content/karen_model.pt");

#predictions are quite bad at the moment.
class_list=[]
with open("/content/Root_Dir/Coco_style/annotations/classes.txt") as file:
  for line in file:
    class_list.append(line.rstrip("\n"))
class_list=class_list[:-1]
img_p="/content/Images_and_Labels/Images/0000002_00005_d_0000014_jpg.rf.555bf2106d899e56d45da0a48295f04c.jpg"
scores, labels, boxes = gtf.Predict(img_p, class_list, vis_threshold=0.4);
from IPython.display import Image
Image(filename='output.jpg')



gtf = Detector();


gtf.Train_Dataset(root_dir, coco_dir, img_dir, set_dir, 
                    batch_size=system["batch_size"], 
                    num_workers=4)

if(system["val_data"] == "yes"):
    gtf.Val_Dataset(val_root_dir, val_coco_dir, val_img_dir, val_set_dir)



gtf.Model(model_name=system["model"]);


gtf.Hyper_Params(lr=system["lr"], 
                    total_iterations=system["iterations"], 
                    val_interval=system["val_interval"])


gtf.Setup();

gtf.Train(display_interval=system["print_interval"]);

print("Completed");

Ejemplo n.º 6
0
        val_classes_file = system["val_yolo_classes_file"]

from train_detector import Detector

gtf = Detector()

gtf.set_train_dataset(root_dir + "/" + img_dir,
                      root_dir + "/" + anno_dir,
                      root_dir + "/" + classes_file,
                      batch_size=system["batch_size"],
                      img_size=system["img_size"],
                      cache_images=system["cache_images"])

if (system["val_data"] == "yes"):
    gtf.set_val_dataset(val_root_dir + "/" + val_img_dir,
                        val_root_dir + "/" + val_anno_dir)

gtf.set_model(model_name=system["model"])

gtf.set_hyperparams(optimizer=system["optimizer"],
                    lr=system["lr"],
                    multi_scale=system["multi_scale"],
                    evolve=system["evolve"],
                    num_generations=system["num_generations"],
                    mixed_precision=system["mixed_precision"],
                    gpu_devices=system["devices"])

gtf.Train(num_epochs=system["epochs"])

print("Completed")