Example #1
0
root_dir = ".";
coco_dir = "coco_dataset_3class";
img_dir = ".";
set_dir = "Images";


# In[12]:


gtf.Train_Dataset(root_dir, coco_dir, img_dir, set_dir, batch_size=8, image_size=512, use_gpu=True)


# In[13]:


gtf.Model();


# In[14]:


gtf.Set_Hyperparams(lr=0.0001, val_interval=1, es_min_delta=0.0, es_patience=0)


# In[ ]:


gtf.Train(num_epochs=30, model_output_dir="trained/");


# In[ ]:
Example #2
0
gtf = Detector()

gtf.Train_Dataset(root_dir,
                  coco_dir,
                  img_dir,
                  set_dir,
                  batch_size=system["batch_size"],
                  image_size=system["image_size"],
                  use_gpu=system["use_gpu"])

if (system["val_data"] == "yes"):
    gtf.Val_Dataset(val_root_dir, val_coco_dir, val_img_dir, val_set_dir)

tmp = system["devices"].split(",")
gpu_devices = []
for i in range(len(tmp)):
    gpu_devices.append(int(tmp[i]))

gtf.Model(gpu_devices=gpu_devices)

gtf.Set_Hyperparams(lr=system["lr"],
                    val_interval=system["val_interval"],
                    es_min_delta=system["es_min_delta"],
                    es_patience=system["es_patience"])

gtf.Train(num_epochs=system["epochs"],
          model_output_dir=system["output_model_dir"])

print("Completed")
Example #3
0
troot_dir = "Root_Dir";
tcoco_dir = "Coco_style";
timg_dir = "images";
tset_dir = "Train";



vroot_dir = "Root_Dir";
vcoco_dir = "Coco_style";
vimg_dir = "images";
vset_dir = "Val";

model.Train_Dataset(troot_dir, tcoco_dir, timg_dir, tset_dir, batch_size=8, image_size=352, use_gpu=True)
model.Val_Dataset(vroot_dir, vcoco_dir, vimg_dir, vset_dir)

model.Model(model_name="resnet34"); # resnet 50 brought cuda memory error.
model.Set_Hyperparams(lr=0.0001, val_interval=1, print_interval=20)
model.Train(num_epochs=300,output_model_name="karen_model.pt");

from infer_detector import Infer
gtf = Infer();
gtf.Model(model_path="/content/karen_model.pt");

#predictions are quite bad at the moment.
class_list=[]
with open("/content/Root_Dir/Coco_style/annotations/classes.txt") as file:
  for line in file:
    class_list.append(line.rstrip("\n"))
class_list=class_list[:-1]
img_p="/content/Images_and_Labels/Images/0000002_00005_d_0000014_jpg.rf.555bf2106d899e56d45da0a48295f04c.jpg"
scores, labels, boxes = gtf.Predict(img_p, class_list, vis_threshold=0.4);
system["val_interval"] = int(system["val_interval"])
system["lr"] = float(system["lr"])

gtf = Detector()

gtf.Train_Dataset(root_dir,
                  coco_dir,
                  img_dir,
                  set_dir,
                  batch_size=system["batch_size"],
                  use_gpu=system["use_gpu"])

if (system["val_data"] == "yes"):
    gtf.Val_Dataset(val_root_dir, val_coco_dir, val_img_dir, val_set_dir)

tmp = system["devices"].split(",")
gpu_devices = []
for i in range(len(tmp)):
    gpu_devices.append(int(tmp[i]))

gtf.Model(model_name=system["model"], gpu_devices=gpu_devices)

gtf.Set_Hyperparams(lr=system["lr"],
                    val_interval=system["val_interval"],
                    print_interval=system["print_interval"])

gtf.Train(num_epochs=system["epochs"],
          output_model_name=system["output_model_name"] + ".pt")

print("Completed")



gtf = Detector();


gtf.Train_Dataset(root_dir, coco_dir, img_dir, set_dir, 
                    batch_size=system["batch_size"], 
                    num_workers=4)

if(system["val_data"] == "yes"):
    gtf.Val_Dataset(val_root_dir, val_coco_dir, val_img_dir, val_set_dir)



gtf.Model(model_name=system["model"]);


gtf.Hyper_Params(lr=system["lr"], 
                    total_iterations=system["iterations"], 
                    val_interval=system["val_interval"])


gtf.Setup();

gtf.Train(display_interval=system["print_interval"]);

print("Completed");