Пример #1
0
# os.environ['CUDA_LAUNCH_BLOCKING'] = "1"

from model.fcos import FCOSDetector
import torch

import torchvision.transforms as transforms
# from dataloader.VOC_dataset import VOCDataset
from dataloader.dataset import Dataset
import math, time
# from torch.utils.tensorboard import SummaryWriter
from tensorboardX import SummaryWriter


model = FCOSDetector(mode="training")
# model = torch.nn.DataParallel(model.cuda(), device_ids=range(torch.cuda.device_count()))
model = model.cuda()
# model=FCOSDetector(mode="training")
optimizer=torch.optim.Adam(model.parameters(), lr=1e-4, weight_decay=1e-4)

BATCH_SIZE = 16
EPOCHS = 60
WARMPUP_STEPS_RATIO = 0.12

transform = transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize((0.485,0.456,0.406), (0.229,0.224,0.225))
])

# 2012_train 2007_val
cfg= {'images_root': '/home', 'train_path': '/mnt/hdd1/benkebishe01/data/train.txt', 'test_path': '/mnt/hdd1/benkebishe01/data/val.txt',
      'img_size': 512}
Пример #2
0
import cv2
from model.fcos import FCOSDetector
import torch
from torchvision import transforms
import numpy as np
from dataloader.VOC_dataset import VOCDataset
import time
import os
from model.config import DefaultConfig as cfg

if __name__ == "__main__":
    model = FCOSDetector(mode="inference", config=cfg)
    model.load_state_dict(
        torch.load("FCOSMASK_epoch61_loss1.0623.pth",
                   map_location=torch.device('cpu')))
    model = model.cuda().eval()
    print("===>success loading model")
    root = cfg.inference_dir
    names = os.listdir(root)
    for name in names:
        img_pad = cv2.imread(root + name)
        img = img_pad.copy()
        img_t = torch.from_numpy(img).float().permute(2, 0, 1)
        img1 = transforms.Normalize([102.9801, 115.9465, 122.7717],
                                    [1., 1., 1.])(img_t)
        img1 = img1.cuda()

        start_t = time.time()
        with torch.no_grad():
            out = model(img1.unsqueeze_(dim=0))
        end_t = time.time()
Пример #3
0
        # if epoch_step % 100 == 0:
        print(
            "global_steps:%d epoch:%d steps:%d/%d cls_loss:%.4f cnt_loss:%.4f reg_loss:%.4f cost_time:%dms lr=%.4e total_loss:%.4f" % \
            (GLOBAL_STEPS, epoch + 1, epoch_step + 1, steps_per_epoch, losses[0].mean(), losses[1].mean(),
             losses[2].mean(), cost_time, lr, loss.mean()))
        GLOBAL_STEPS += 1

    for idx in range(4):
        summary.add_scalar(loss_list[idx], losses[idx][0], epoch)
    torch.save(model.state_dict(),
               "./checkpoint2/model_{}.pth".format(epoch + 1))

    # if epoch + 1 > 23:
    model2 = FCOSDetector(mode="inference")
    model2 = torch.nn.DataParallel(model2)
    model2 = model2.cuda().eval()
    model2.load_state_dict(torch.load(
        "./checkpoint2/model_{}.pth".format(epoch + 1),
        map_location=torch.device('cuda:1')),
                           strict=False)
    tt = coco_eval.evaluate_coco(val_dataset, model2)
    m_acc = tt[4].astype(float)
    if m_acc > best_acc:
        best_acc = m_acc
        best_ep = epoch + 1

    data_ = dict()
    for idx, key in enumerate(eval_list):
        summary.add_scalar(key, tt[idx].astype(float), epoch)
    print("Best Acc of Medium : {0}, Best Ep of Medium : {1}".format(
        best_acc, best_ep))