Esempio n. 1
0
from model.fcos import FCOSDetector
import torch
from dataloader.VOC_dataset import VOCDataset
import math, time
from torch.utils.tensorboard import SummaryWriter

train_dataset = VOCDataset(
    "D:\\Research\\My_tamper_detect_dataset_train_val_test\\voc_dataset_tmp",
    resize_size=[512, 800],
    split='train')
val_dataset = VOCDataset(
    "D:\\Research\\My_tamper_detect_dataset_train_val_test\\voc_dataset_tmp",
    resize_size=[512, 800],
    split='val')

model = FCOSDetector(mode="training").cuda()
# model.load_state_dict(torch.load("./checkpoints/voc_512x800_loss2.0635.pth"))
optimizer = torch.optim.Adam(model.parameters(), lr=1e-4)

BATCH_SIZE = 3
EPOCHS = 25
WARMPUP_STEPS_RATIO = 0.12
# train_loader=torch.utils.data.DataLoader(train_dataset,batch_size=BATCH_SIZE,shuffle=True,collate_fn=train_dataset.collate_fn)
# val_loader=torch.utils.data.DataLoader(val_dataset,batch_size=BATCH_SIZE,shuffle=True,collate_fn=val_dataset.collate_fn)
train_loader = torch.utils.data.DataLoader(train_dataset,
                                           batch_size=BATCH_SIZE,
                                           shuffle=False,
                                           collate_fn=train_dataset.collate_fn)
val_loader = torch.utils.data.DataLoader(val_dataset,
                                         batch_size=BATCH_SIZE,
                                         shuffle=False,
Esempio n. 2
0
    transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
    ])
    val_loader = torch.utils.data.DataLoader(Dataset(images_root,
                                                     val_path,
                                                     img_size=512,
                                                     transform=transform,
                                                     train=False),
                                             batch_size=64,
                                             shuffle=False)
    draw = False
    if draw:
        writer = SummaryWriter(comment='voc_mAP')

    model = FCOSDetector(mode="inference")
    # model.load_state_dict(torch.load("/mnt/hdd1/benkebishe01/FCOS/80epoch/fcos_voc_3/voc_epoch80_loss0.8893.pth"))
    model = model.to(device).eval()
    print("===>success loading model")

    names = os.listdir(model_root)
    names = sorted(names, key=functools.cmp_to_key(compare))

    for name in names:
        # print(name)
        model.load_state_dict(torch.load(os.path.join(model_root + "/" +
                                                      name)))

        gt_boxes = []
        gt_classes = []
        pred_boxes = []
Esempio n. 3
0
        "cow",
        "diningtable",
        "dog",
        "horse",
        "motorbike",
        "person",
        "pottedplant",
        "sheep",
        "sofa",
        "train",
        "tvmonitor",
    )



model = FCOSDetector(mode="inference", config=Config)
# model = torch.nn.DataParallel(model)
ckpt = torch.load('/mnt/hdd1/benkebishe01/FCOS/fcos_fusion/voc2012_512x512_epoch71_loss0.7739.pth')
# ckpt = torch.load('/mnt/hdd1/benkebishe01/fcos_anchor/voc2012_512x512_epoch68_loss0.7201.pth')

model.load_state_dict(ckpt)
model.to(device).eval()
print('loading weights successfully...')

transform = transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
])

def preprocess_img(img, img_size=416):
    # img = np.array(img)  # h w
Esempio n. 4
0
train_dataset = COCODataset(
    "/HDD/jaeha/dataset/COCO/2017/train2017",
    '/HDD/jaeha/dataset/COCO/2017/annotations/changed500_instances_train2017.json',
    transform=transform,
    resize_size=resize_)
val_dataset = COCOGenerator(
    "/HDD/jaeha/dataset/COCO/2017/val2017",
    '/HDD/jaeha/dataset/COCO/2017/annotations/changed500_instances_val2017.json',
    resize_size=resize_)

# changed_val_dataset=COCOGenerator("/HDD/jaeha/dataset/COCO/2017/val2017",
#                           '/HDD/jaeha/dataset/COCO/2017/annotations/changed_instances_train2014.json',resize_size=resize_)

# train_dataset=COCODataset("/HDD/jaeha/dataset/COCO/2017/val2017",
#                           '/HDD/jaeha/dataset/COCO/2017/annotations/instances_val2017.json',transform=transform,resize_size=[600,600])
model = FCOSDetector(mode="training").cuda()
print(model)

model = torch.nn.DataParallel(model)
# model.load_state_dict(torch.load("./checkpoint/model_{}.pth".format(21), map_location=torch.device('cuda:0')))

BATCH_SIZE = opt.batch_size
EPOCHS = opt.epochs
train_loader = torch.utils.data.DataLoader(train_dataset,
                                           batch_size=BATCH_SIZE,
                                           shuffle=True,
                                           collate_fn=train_dataset.collate_fn,
                                           num_workers=opt.n_cpu,
                                           worker_init_fn=np.random.seed(0))
steps_per_epoch = len(train_dataset) // BATCH_SIZE
TOTAL_STEPS = steps_per_epoch * EPOCHS