Exemplo n.º 1
0
from dataloader.VOC_dataset import VOCDataset
import math, time
from torch.utils.tensorboard import SummaryWriter

train_dataset = VOCDataset(
    "D:\\Research\\My_tamper_detect_dataset_train_val_test\\voc_dataset_tmp",
    resize_size=[512, 800],
    split='train')
val_dataset = VOCDataset(
    "D:\\Research\\My_tamper_detect_dataset_train_val_test\\voc_dataset_tmp",
    resize_size=[512, 800],
    split='val')

model = FCOSDetector(mode="training").cuda()
# model.load_state_dict(torch.load("./checkpoints/voc_512x800_loss2.0635.pth"))
optimizer = torch.optim.Adam(model.parameters(), lr=1e-4)

BATCH_SIZE = 3
EPOCHS = 25
WARMPUP_STEPS_RATIO = 0.12
# train_loader=torch.utils.data.DataLoader(train_dataset,batch_size=BATCH_SIZE,shuffle=True,collate_fn=train_dataset.collate_fn)
# val_loader=torch.utils.data.DataLoader(val_dataset,batch_size=BATCH_SIZE,shuffle=True,collate_fn=val_dataset.collate_fn)
train_loader = torch.utils.data.DataLoader(train_dataset,
                                           batch_size=BATCH_SIZE,
                                           shuffle=False,
                                           collate_fn=train_dataset.collate_fn)
val_loader = torch.utils.data.DataLoader(val_dataset,
                                         batch_size=BATCH_SIZE,
                                         shuffle=False,
                                         collate_fn=val_dataset.collate_fn)
steps_per_epoch = len(train_dataset) // BATCH_SIZE
Exemplo n.º 2
0
model = torch.nn.DataParallel(model)
BATCH_SIZE = opt.batch_size
EPOCHS = opt.epochs
train_loader = torch.utils.data.DataLoader(train_dataset,
                                           batch_size=BATCH_SIZE,
                                           shuffle=True,
                                           collate_fn=train_dataset.collate_fn,
                                           num_workers=opt.n_cpu,
                                           worker_init_fn=np.random.seed(0))
steps_per_epoch = len(train_dataset) // BATCH_SIZE
TOTAL_STEPS = steps_per_epoch * EPOCHS
WARMUP_STEPS = 500
WARMUP_FACTOR = 1.0 / 3.0
GLOBAL_STEPS = 0
LR_INIT = 0.01
optimizer = torch.optim.SGD(model.parameters(),
                            lr=LR_INIT,
                            momentum=0.9,
                            weight_decay=0.0001)
lr_schedule = [120000, 160000]


def lr_func(step):
    lr = LR_INIT
    if step < WARMUP_STEPS:
        alpha = float(step) / WARMUP_STEPS
        warmup_factor = WARMUP_FACTOR * (1.0 - alpha) + alpha
        lr = lr * warmup_factor
    else:
        for i in range(len(lr_schedule)):
            if step < lr_schedule[i]:
Exemplo n.º 3
0
#WARMPUP_STEPS_RATIO = 0.12
train_loader = torch.utils.data.DataLoader(train_dataset,
                                           batch_size=BATCH_SIZE,
                                           shuffle=True,
                                           collate_fn=train_dataset.collate_fn,
                                           num_workers=opt.n_cpu,
                                           worker_init_fn=np.random.seed(0))
print("total_images : {}".format(len(train_dataset)))
steps_per_epoch = len(train_dataset) // BATCH_SIZE
TOTAL_STEPS = steps_per_epoch * EPOCHS
WARMPUP_STEPS = 501

GLOBAL_STEPS = 1
LR_INIT = 1e-3
LR_END = 1e-5
optimizer = torch.optim.SGD(model.parameters(),
                            lr=LR_INIT,
                            momentum=0.9,
                            weight_decay=0.0001)

model.train()

for epoch in range(EPOCHS):
    for epoch_step, data in enumerate(train_loader):

        batch_imgs, batch_boxes, batch_classes = data
        batch_imgs = batch_imgs.cuda()
        batch_boxes = batch_boxes.cuda()
        batch_classes = batch_classes.cuda()

        #lr = lr_func()
Exemplo n.º 4
0
model = FCOSDetector(mode="training").cuda()

#

# lambda1=lambda epoch: (epoch / 520) if epoch < 520 else 0.5 * (math.cos((epoch - 520)/(200 * 65 - 520) * math.pi) + 1)
# optimizer = optim.SGD(model.parameters(), lr=lr_rate, momentum=0.9, nesterov=True)

# scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda1)

if config.muti_lr:
    Scene_params = list(map(id, model.fcos_body.scene_head.parameters()))
    backbone_params = list(map(id, model.fcos_body.backbone.parameters()))
    base_params = filter(
        lambda p: id(p) not in Scene_params and id(p) not in backbone_params,
        model.parameters())

    params = [{
        "params": model.fcos_body.scene_head.parameters(),
        "lr": 1e-4
    }, {
        "params": base_params,
        "lr": 1e-3
    }, {
        "params": model.fcos_body.backbone.parameters(),
        "lr": 1e-4
    }]
    optimizer = torch.optim.Adam(params)

else:
    print("simple lr")
Exemplo n.º 5
0
BATCH_SIZE = opt.batch_size
EPOCHS = opt.epochs
#WARMPUP_STEPS_RATIO = 0.12
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=BATCH_SIZE, shuffle=True,
                                           collate_fn=train_dataset.collate_fn,
                                           num_workers=opt.n_cpu, worker_init_fn=np.random.seed(0))
print("total_images : {}".format(len(train_dataset)))
steps_per_epoch = len(train_dataset) // BATCH_SIZE
TOTAL_STEPS = steps_per_epoch * EPOCHS

GLOBAL_STEPS = 1
LR_INIT = 1e-3
LR_END = 1e-5
lr = LR_INIT
optimizer = torch.optim.SGD(model.parameters(),lr =LR_INIT,momentum=0.9,weight_decay=0.0001)

model.train()

for epoch in range(EPOCHS):
    for epoch_step, data in enumerate(train_loader):

        batch_imgs, batch_boxes, batch_classes = data
        batch_imgs = batch_imgs.cuda()
        batch_boxes = batch_boxes.cuda()
        batch_classes = batch_classes.cuda()

        if GLOBAL_STEPS == int(TOTAL_STEPS*0.6):
            lr = LR_INIT * 0.1
            for param in optimizer.param_groups:
                param['lr'] = lr
Exemplo n.º 6
0
from model.fcos import FCOSDetector
import torch

import torchvision.transforms as transforms
# from dataloader.VOC_dataset import VOCDataset
from dataloader.dataset import Dataset
import math, time
# from torch.utils.tensorboard import SummaryWriter
from tensorboardX import SummaryWriter


model = FCOSDetector(mode="training")
# model = torch.nn.DataParallel(model.cuda(), device_ids=range(torch.cuda.device_count()))
model = model.cuda()
# model=FCOSDetector(mode="training")
optimizer=torch.optim.Adam(model.parameters(), lr=1e-4, weight_decay=1e-4)

BATCH_SIZE = 16
EPOCHS = 60
WARMPUP_STEPS_RATIO = 0.12

transform = transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize((0.485,0.456,0.406), (0.229,0.224,0.225))
])

# 2012_train 2007_val
cfg= {'images_root': '/home', 'train_path': '/mnt/hdd1/benkebishe01/data/train.txt', 'test_path': '/mnt/hdd1/benkebishe01/data/val.txt',
      'img_size': 512}

train_dataset = Dataset(cfg['images_root'], cfg['train_path'], img_size=cfg['img_size'], transform=transform, train=True)