Exemple #1
0
n_crops = args.num_crops
batch_size = args.batch_size
using_gpu = torch.cuda.is_available()

checkpoint_dir = 'checkpoint'

start_epoch = 0  # start from epoch 0 or last epoch

# Data
print('==> Preparing data..')
transform = transforms.Compose([
    transforms.ToTensor()
])

trainset = ListDataset(root="../train", gt_extension=".txt",
                      labelmap_path="class_label_map.xlsx", is_train=True, transform=transform, input_image_size=512,
                      num_crops=n_crops, original_img_size=512)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=True, num_workers=8, collate_fn=trainset.collate_fn)

validset = ListDataset(root="../valid", gt_extension=".txt",
                      labelmap_path="class_label_map.xlsx", is_train=False, transform=transform, input_image_size=512,
                      num_crops=1, original_img_size=512)
validloader = torch.utils.data.DataLoader(validset, batch_size=batch_size, shuffle=False, num_workers=8, collate_fn=validset.collate_fn)

print("lr : " + str(lr))
print("num. of classes : " + str(num_classes))
print("optimizer : " + selected_optim)
print("Using cuda : " + str(using_gpu))
print("Num. of crops : " + str(n_crops))
print("Size of batch : " + str(batch_size))
print("num. train data : " + str(trainset.__len__()))
Exemple #2
0
fix = 'head'

assert torch.cuda.is_available(), 'Error: CUDA not found!'
best_loss = float('inf')  # best test loss
start_epoch = 0  # start from epoch 0 or last epoch

# Data
print('==> Preparing data..')
transform = transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
])

trainset = ListDataset(
    list_file='/group/proteindl/ps793/Dota/train_retina_dota_nd.txt',
    train=True,
    transform=transform,
    input_size=1024)
trainloader = torch.utils.data.DataLoader(trainset,
                                          batch_size=2,
                                          shuffle=True,
                                          num_workers=8,
                                          collate_fn=trainset.collate_fn)

testset = ListDataset(
    list_file='/group/proteindl/ps793/Dota/val_retina_dota_nd.txt',
    train=False,
    transform=transform,
    input_size=1024)
testloader = torch.utils.data.DataLoader(testset,
                                         batch_size=2,
Exemple #3
0
# parameters
start_epoch = 0
end_epoch = 200
batch_size = 32
lr = 1e-3
resume = False
torch.utils.backcompat.keepdim_warning.enabled = True
torch.utils.backcompat.broadcast_warning.enabled = True
train_list = './utils/train.txt'
val_list = './utils/test.txt'
ckpt_name = 'coco'

transform = transforms.Compose([transforms.ToTensor(),
                                transforms.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225))])
train_set = ListDataset(list_file=train_list, train=True, transform=transform)
train_loader = torch.utils.data.DataLoader(train_set, batch_size=batch_size, shuffle=False, num_workers=4)
valid_set = ListDataset(list_file=val_list, train=False, transform=transform)
valid_loader = torch.utils.data.DataLoader(valid_set, batch_size=batch_size, shuffle=False, num_workers=4)

net = SSD300()
if resume:
    print('resuming from checkpoint..')
    checkpoint = torch.load('./checkpoint/ckpt.pth')
    net.load_state_dict(checkpoint['net'])
    best_loss = checkpoint['loss']
    start_epoch = checkpoint['epoch']
else:
    net.load_state_dict(torch.load('./weights/ssd_initializedVGG.pth', map_location=lambda storage, loc: storage))

torch.cuda.set_device(0)
Exemple #4
0
args = parser.parse_args()

assert torch.cuda.is_available(), 'Error: CUDA not found!'
assert args.focal_loss, "OHEM + ce_loss is not working... :("

if not os.path.exists(args.save_folder):
    os.mkdir(args.save_folder)

if not os.path.exists(args.logdir):
    os.mkdir(args.logdir)

# Data
print('==> Preparing data..')
trainset = ListDataset(root='/root/DB/',
                       dataset=args.dataset,
                       train=True,
                       transform=Augmentation_traininig,
                       input_size=args.input_size,
                       multi_scale=args.multi_scale)
trainloader = torch.utils.data.DataLoader(trainset,
                                          batch_size=args.batch_size,
                                          shuffle=True,
                                          num_workers=args.num_workers,
                                          collate_fn=trainset.collate_fn)

# set model (focal_loss vs OHEM_CE loss)
if args.focal_loss:
    imagenet_pretrain = 'weights/retinanet_se50.pth'
    criterion = FocalLoss()
    num_classes = 1
else:
    imagenet_pretrain = 'weights/retinanet_se50_OHEM.pth'
Exemple #5
0
parser.add_argument('--resume', '-r', action='store_true', help='resume from checkpoint')
args = parser.parse_args()

use_cuda = torch.cuda.is_available() 
best_loss = float('inf')  # best test loss
start_epoch = 0  # start from epoch 0 or last epoch

# Data
print('==> Preparing data..')
transform = transforms.Compose([transforms.ToTensor(),
                                transforms.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225))])
'''transform = transforms.Compose([transforms.ToTensor(),
                                transforms.Normalize(mean=(0.485,), std=(0.229,))])'''

'''trainset = ListDataset(root='/home/biometric/Ranjeet/Data_For_Faster_RCNN/Iris_jpg', list_file='./voc_data/training.txt', train=True, transform=transform)'''
trainset = ListDataset(root='images/training/images', list_file='labels/training.txt', train=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=20, shuffle=True, num_workers=3)

testset = ListDataset(root='images/training/images', list_file='labels/testing.txt', train=False, transform=transform)
testloader = torch.utils.data.DataLoader(testset, batch_size=20, shuffle=False, num_workers=3)
# Modl
net = SSD300()
if args.resume:
    print('==> Resuming from checkpoint..')
    checkpoint = torch.load('./checkpoint/ckpt.pth')
    net.load_state_dict(checkpoint['net'])
    best_loss = checkpoint['loss']
    start_epoch = checkpoint['epoch']
else:
    # Convert from pretrained VGG model.
    print("Om")
epoch = 200

use_cuda = torch.cuda.is_available()
best_loss = float('inf')  # best test loss
start_epoch = 0  # start from epoch 0 or last epoch

# Data
print('==> Preparing data..')
transform = transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225))
])

trainset = ListDataset(
    root='../../../../VisDrone2019/dataset/VisDrone2018-DET-train/images/',
    list_file=
    '../../../../VisDrone2019/dataset/VisDrone2018-DET-train/annotations/',
    train=True,
    transform=transform)
trainloader = torch.utils.data.DataLoader(trainset,
                                          batch_size=8,
                                          shuffle=True,
                                          num_workers=4)

valset = ListDataset(
    root='../../../../VisDrone2019/dataset/VisDrone2019-DET-val/images/',
    list_file=
    '../../../../VisDrone2019/dataset/VisDrone2019-DET-val/annotations/',
    train=True,
    transform=transform)
valloader = torch.utils.data.DataLoader(valset,
                                        batch_size=8,
Exemple #7
0


############ Variable #########################3333

best_loss = float('inf')  
start_epoch = 0  # start from epoch 0 or last epoch
current_best_model = ''

####################################################

# Data
print('==> Preparing data..')
transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225))])

trainset = ListDataset(root=args.train_dir, list_file=args.train_meta, train=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=args.batch_size, shuffle=True, drop_last=True)

testset = ListDataset(root=args.validate_dir, list_file=args.validate_meta, train=False, transform=transform)
testloader = torch.utils.data.DataLoader(testset, batch_size=args.batch_size, shuffle=False,drop_last=True)


net = SSD300()

if args.use_cuda:
	net.cuda()
	cudnn.benchmark = True

if args.resume_mode == 'continue':
	print('==> Resuming from checkpoint..')
	checkpoint = torch.load(args.resuming_model)
Exemple #8
0
start_epoch = 0  # start from epoch 0 or last epoch

# Data
print('==> Preparing data..')
list_files_root = '../../../AdienceFaces/folds/train_val_txt_files_per_fold/test_fold_is_0'
images_root = '../../../AdienceFaces/DATA/aligned'

transform_train = transforms.Compose([
    transforms.CenterCrop(150),
    transforms.RandomCrop(150, padding=4),
    transforms.RandomHorizontalFlip(),
    transforms.ToTensor(),
    transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
])
trainset = ListDataset(root=images_root,
                       list_file=os.path.join(list_files_root,
                                              'age_gender_train_subset.txt'),
                       transform=transform_train)
trainloader = torch.utils.data.DataLoader(trainset,
                                          batch_size=128,
                                          shuffle=True,
                                          num_workers=8)

transform_test = transforms.Compose([
    transforms.CenterCrop(150),
    transforms.ToTensor(),
    transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
])
testset = ListDataset(root=images_root,
                      list_file=os.path.join(list_files_root,
                                             'age_gender_test.txt'),
                      transform=transform_test)
Exemple #9
0
epoch = 200
batch_size = 8

use_cuda = torch.cuda.is_available()
best_loss = float('inf')  # best test loss
start_epoch = 0  # start from epoch 0 or last epoch

# Data
print('==> Preparing data..')
transform = transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize(mean=(0.356, 0.368, 0.362), std=(0.242, 0.235, 0.236))
])

trainset = ListDataset(root=TRAIN_IMG_DIR,
                       list_file=TRAIN_ANNOT_DIR,
                       train=True,
                       transform=transform)
trainloader = torch.utils.data.DataLoader(trainset,
                                          batch_size=8,
                                          shuffle=True,
                                          num_workers=4)

valset = ListDataset(root=VAL_IMAGE_DIR,
                     list_file=VAL_ANNOT_DIR,
                     train=True,
                     transform=transform)
valloader = torch.utils.data.DataLoader(valset,
                                        batch_size=8,
                                        shuffle=True,
                                        num_workers=4)
Exemple #10
0
args = parser.parse_args()

use_cuda = torch.cuda.is_available()
best_loss = float('inf')  # best test loss
start_epoch = 0  # start from epoch 0 or last epoch

# Data
print('==> Preparing data..')
transform = transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225))
])

trainset = ListDataset(
    root='/search/liukuang/data/VOC2012_trainval_test_images',
    list_file='./voc_data/voc12_train.txt',
    train=True,
    transform=transform)
trainloader = torch.utils.data.DataLoader(trainset,
                                          batch_size=32,
                                          shuffle=True,
                                          num_workers=4)

testset = ListDataset(
    root='/search/liukuang/data/VOC2012_trainval_test_images',
    list_file='./voc_data/voc12_test.txt',
    train=False,
    transform=transform)
testloader = torch.utils.data.DataLoader(testset,
                                         batch_size=32,
                                         shuffle=False,
use_cuda = torch.cuda.is_available()
best_loss = float('inf')  # best test loss
start_epoch = 0  # start from epoch 0 or last epoch

# Data
print('==> Preparing data..')
transform = transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225))
])
'''transform = transforms.Compose([transforms.ToTensor(),
                                transforms.Normalize(mean=(0.485,), std=(0.229,))])'''
'''trainset = ListDataset(root='/home/biometric/Ranjeet/Data_For_Faster_RCNN/Iris_jpg', list_file='./voc_data/training.txt', train=True, transform=transform)'''
trainset = ListDataset(
    root=
    '/media/biometric/Data1/Ranjeet/NewPytorch/SSD_Ear_RGB_300/Cropped_2000_wild',
    list_file='./voc_data/Cropped_wild_train.txt',
    train=True,
    transform=transform)
trainloader = torch.utils.data.DataLoader(trainset,
                                          batch_size=20,
                                          shuffle=True,
                                          num_workers=3)

testset = ListDataset(
    root=
    '/media/biometric/Data1/Ranjeet/NewPytorch/SSD_Ear_RGB_300/Cropped_2000_wild',
    list_file='./voc_data/Cropped_wild_test.txt',
    train=False,
    transform=transform)
testloader = torch.utils.data.DataLoader(testset,
                                         batch_size=20,
epoch = 0
global_iter = 1
step_index = 0
stepvalues = (5000, 10000, 15000)  # learning rate decay steps
cur_lrG = opt.lrG
cur_lrD = opt.lrD
""" Get data loader """
transform = transforms.Compose([
    transforms.ToTensor(),
    #transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))
])

# <augmentation=False> for test
trainset = ListDataset(root=opt.root,
                       dataset='densepoint',
                       mode="train",
                       num_pts=opt.num_pts,
                       transform=transform,
                       augmentation=opt.augmentation)
train_loader = torch.utils.data.DataLoader(trainset,
                                           batch_size=opt.batch_size,
                                           shuffle=True,
                                           num_workers=opt.num_workers)
""" Networks : Generator & Discriminator """
G = Generator(opt.num_pts)
D = Discriminator(opt.num_pts)

G.weight_init(mean=0.0, std=0.02)
D.weight_init(mean=0.0, std=0.02)
""" set CUDA """
G.cuda()
D.cuda()
from loss import FocalLoss
from notenet import NoteNet
from datagen import ListDataset

import omr_utils

# Prepare Data
transform = transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
])

trainset = ListDataset(root='./data/manual/images',
                       list_file='./data/manual/annotations_train.csv',
                       train=True,
                       transform=transform,
                       input_size=512)
trainloader = torch.utils.data.DataLoader(trainset,
                                          batch_size=20,
                                          shuffle=True,
                                          collate_fn=trainset.collate_fn)

testset = ListDataset(root='./data/manual/images',
                      list_file='./data/manual/annotations_val.csv',
                      train=False,
                      transform=transform,
                      input_size=512)
testloader = torch.utils.data.DataLoader(testset,
                                         batch_size=20,
                                         shuffle=False,
Exemple #14
0
])

transform_test = transforms.Compose([
    transforms.Resize(input_size),
    #transforms.RandomCrop(cut_size),
    #transforms.RandomHorizontalFlip(),
    transforms.TenCrop(cut_size),
    transforms.Lambda(lambda crops: torch.stack(
        [transforms.ToTensor()(crop) for crop in crops])),
    #transforms.ToTensor(),
    #transforms.Normalize((0.485,0.456,0.406), (0.229,0.224,0.225))
])

#trainset=torchvision.datasets.ImageFolder(train_data,transform_train)
trainset = ListDataset(
    root='../train_val_imgs/Manually/Manually_train_croped/',
    list_file='./AffectNet/train.txt',
    transform=transform_train)
trainloader = DataLoader(trainset, bs, shuffle=True, num_workers=12)
#testset=torchvision.datasets.ImageFolder(test_data,transform_test)
testset = ListDataset(
    root='../train_val_imgs/Manually/Manually_validation_croped/',
    list_file='./AffectNet/val.txt',
    transform=transform_test)
testloader = DataLoader(testset, batch_size=128, shuffle=True, num_workers=12)

net = ShuffleNetV2(input_size, n_class)
'''
model_summary(net,input_size=(3,input_size,input_size))
flops, params = get_model_complexity_info(net, (input_size, input_size), as_strings=True, print_per_layer_stat=False)
print('Flops:  ' + flops)
print('Params: ' + params)
Exemple #15
0
                    help='epoch number')
args = parser.parse_args()

use_cuda = torch.cuda.is_available()
best_loss = float('inf')  # best test loss
start_epoch = 0  # start from epoch 0 or last epoch

# Data
print('==> Preparing data..')
transform = transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225))
])

trainset = ListDataset(root='data/VOC2012_trainval_train_images/',
                       list_file='./voc_data/voc12_train.txt',
                       train=True,
                       transform=transform)
trainloader = torch.utils.data.DataLoader(trainset,
                                          batch_size=8,
                                          shuffle=True,
                                          num_workers=4)

# Model
net = SSD300()
if args.resume:
    print('==> Resuming from checkpoint..')
    checkpoint = torch.load('./checkpoint/ckpt.pth')
    net.load_state_dict(checkpoint['net'])
    best_loss = checkpoint['loss']
    start_epoch = checkpoint['epoch']
else:
Exemple #16
0
best_loss = float('inf')  # best test loss
start_epoch = 0  # start from epoch 0 or last epoch

# Data
print('==> Preparing data..')
transform = transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225))
])
'''transform = transforms.Compose([transforms.ToTensor(),
                                transforms.Normalize(mean=(0.485,), std=(0.229,))])'''
'''trainset = ListDataset(root='/home/biometric/Ranjeet/Data_For_Faster_RCNN/Iris_jpg', list_file='./voc_data/training.txt', train=True, transform=transform)'''
trainset = ListDataset(
    root=
    '/media/biometric/Data1/Database/Ear_DataSet/Ear_in_wild/Collectionb_all',
    list_file=
    '//media/biometric/Data1/Database/Ear_DataSet/Ear_in_wild/GT_collectionb_train.txt',
    train=True,
    transform=transform)
trainloader = torch.utils.data.DataLoader(trainset,
                                          batch_size=20,
                                          shuffle=True,
                                          num_workers=3)

testset = ListDataset(
    root=
    '/media/biometric/Data1/Database/Ear_DataSet/Ear_in_wild/Collectionb_all',
    list_file=
    '//media/biometric/Data1/Database/Ear_DataSet/Ear_in_wild/GT_collectionb_test.txt',
    train=False,
    transform=transform)
Exemple #17
0
fix = 'head'

assert torch.cuda.is_available(), 'Error: CUDA not found!'
best_loss = float('inf')  # best test loss
start_epoch = 0  # start from epoch 0 or last epoch

# Data
print('==> Preparing data..')
transform = transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
])

testset = ListDataset(
    list_file='/mnt/ssd_disk/naka247/peng/DOTA/val_retina_dota_nd.txt',
    train=False,
    transform=transform,
    input_size=1024)
testloader = torch.utils.data.DataLoader(testset,
                                         batch_size=2,
                                         shuffle=False,
                                         num_workers=8,
                                         collate_fn=testset.collate_fn)

test_loss = 0
for batch_idx, (inputs, loc_targets, cls_targets) in enumerate(testloader):
    #need to check 10_G272_15Nov2016_0019.JPG, batch_idx=5
    #inputs = Variable(inputs.cuda(), volatile=True)
    #loc_targets = Variable(loc_targets.cuda())
    cls_targets = Variable(cls_targets.cuda())
Exemple #18
0
assert torch.cuda.is_available(), 'Error: CUDA not found!'
best_correct = 0  # best number of age_correct + gender_correct
start_epoch = 0  # start from epoch 0 or last epoch

# Data
print('==> Preparing data..')
transform_train = transforms.Compose([
    transforms.CenterCrop(150),
    transforms.RandomCrop(150, padding=4),
    transforms.RandomHorizontalFlip(),
    transforms.ToTensor(),
    transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
])
trainset = ListDataset(root='./images',
                       list_file='./data/train.txt',
                       transform=transform_train)
trainloader = torch.utils.data.DataLoader(trainset,
                                          batch_size=128,
                                          shuffle=True,
                                          num_workers=8)

transform_test = transforms.Compose([
    transforms.CenterCrop(150),
    transforms.ToTensor(),
    transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
])
testset = ListDataset(root='./images',
                      list_file='./data/test.txt',
                      transform=transform_test)
testloader = torch.utils.data.DataLoader(testset,
Exemple #19
0
pjoin = os.path.join

assert torch.cuda.is_available(), 'Error: CUDA not found!'
best_loss = float('inf')  # best test loss
start_epoch = 0  # start from epoch 0 or last epoch

# Data
print('==> Preparing data..')
transform = transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
])
batchsize = 4
trainset = ListDataset(root=pjoin(args.path, 'train'),
                       list_file=pjoin(args.path, 'gt_train.csv'),
                       train=True,
                       transform=transform,
                       input_size=600,
                       max_size=1000)
trainloader = torch.utils.data.DataLoader(trainset,
                                          batch_size=batchsize,
                                          shuffle=True,
                                          num_workers=4,
                                          collate_fn=trainset.collate_fn)

testset = ListDataset(root=pjoin(args.path, 'test'),
                      list_file=pjoin(args.path, 'gt_test.csv'),
                      train=False,
                      transform=transform,
                      input_size=600,
                      max_size=1000)
testloader = torch.utils.data.DataLoader(testset,
Exemple #20
0
learning_rate = 0.001
resume = False

batch_size = 2  
dice_score = 0
ite = 0
####################################################

# Data
print('==> Preparing data..')
transform = transforms.Compose([transforms.ToTensor(),
							transforms.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225))])


testset = ListDataset(root='./dataset/Exp_Test/Exp_Test_BKNgoc/', list_file='./voc_data/ssd_test_BKN.txt', train=False, transform=transform)
testloader = torch.utils.data.DataLoader(testset, batch_size=batch_size, shuffle=False, drop_last=True)

net = SSD300()

if use_cuda:
	if resume:
		pass
        #net = torch.nn.DataParallel(net, device_ids=[0,1,2,3,4,5,6,7])
		net.cuda()
		cudnn.benchmark = True

print('==> Resuming from checkpoint..')
#    checkpoint = torch.load('./checkpoint/ssdtrain0511_12.pth')
checkpoint = torch.load('./checkpoint/ssdtrain0511_12.pth', map_location=lambda storage, loc: storage)
checkpoint['net']
Exemple #21
0
best_loss = float('inf')  # best test loss
start_epoch = 0  # start from epoch 0 or last epoch

# Data
print('==> Preparing data..')
transform = transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
])

# ROOT_PATH = '/search/odin/liukuang/data/voc_all_images'
ROOT_PATH = "data/images"

trainset = ListDataset(root=ROOT_PATH,
                       list_file='./data/custom_train.txt',
                       train=True,
                       transform=transform,
                       input_size=600)
trainloader = torch.utils.data.DataLoader(trainset,
                                          batch_size=1,
                                          shuffle=False,
                                          num_workers=1,
                                          collate_fn=trainset.collate_fn)

testset = ListDataset(root=ROOT_PATH,
                      list_file='./data/custom_test.txt',
                      train=False,
                      transform=transform,
                      input_size=600)
testloader = torch.utils.data.DataLoader(testset,
                                         batch_size=1,
Exemple #22
0
def main():
	parser = argparse.ArgumentParser()
	parser.add_argument('--batchSz', type=int, default=1, help='batch size')
	parser.add_argument('--nEpochs', type=int, default=300, help='number of epoch to end training')
	parser.add_argument('--lr', type=float, default=1e-5, help='learning rate')
	parser.add_argument('--momentum', type=float, default=0.9)
	parser.add_argument('--wd', type=float, default=5e-4, help='weight decay')
	# parser.add_argument('--save')
	# parser.add_argument('--seed', type=int, default=1)
	parser.add_argument('--opt', type=str, default='sgd', choices=('sgd', 'adam', 'rmsprop'))
	parser.add_argument('--resume', '-r', action='store_true', help='resume from checkpoint')
	parser.add_argument('--resume_from', type=int, default=220, help='resume from which checkpoint')
	parser.add_argument('--visdom', '-v', action='store_true', help='use visdom for training visualization')
	args = parser.parse_args()

	# args.save = args.save or 'work/DSOS.base'
	# setproctitle.setproctitle(args.save)
	# if os.path.exists(args.save):
	# 	shutil.rmtree(args.save)
	# os.makedirs(args.save, exist_ok=True)

	use_cuda = torch.cuda.is_available()
	best_loss = float('inf') # best test loss
	start_epoch = 0 # start from epoch 0 for last epoch

	normMean = [0.485, 0.456, 0.406]
	normStd = [0.229, 0.224, 0.225]
	normTransform = transforms.Normalize(normMean, normStd)

	trainTransform = transforms.Compose([
		transforms.Scale((300, 300)),
		transforms.ToTensor(),
		normTransform
		])

	testTransform = transforms.Compose([
		transforms.Scale((300, 300)),
		transforms.ToTensor(),
		normTransform
		])

	# Data
	kwargs = {'num_workers': 4, 'pin_memory': True} if use_cuda else {}
	trainset = ListDataset(root=cfg.img_root, list_file=cfg.label_train,
		                   train=True, transform=trainTransform)
	trainLoader = DataLoader(trainset, batch_size=args.batchSz,
		                     shuffle=True, **kwargs)
	testset = ListDataset(root=cfg.img_root, list_file=cfg.label_test,
		                  train=False, transform=testTransform)
	testLoader = DataLoader(testset, batch_size=args.batchSz,
		                    shuffle=False, **kwargs)
 
	# Model
	net = DSOD(growthRate=48, reduction=1)
	if args.resume:
		print('==> Resuming from checkpoint...')
		checkpoint = torch.load('./checkpoint/ckpt_{:03d}.pth'.format(args.resume_from))
		net.load_state_dict(checkpoint['net'])
		best_loss = checkpoint['loss']
		start_epoch = checkpoint['epoch']+1
		print('Previours_epoch: {}, best_loss: {}'.format(start_epoch-1, best_loss))
	else:
		print('==> Initializing weight...')
		def init_weights(m):
			if isinstance(m, nn.Conv2d):
				init.xavier_uniform(m.weight.data)
				# m.bias.data.zero_()
		net.apply(init_weights)

	print(' + Number of params: {}'.format(
		sum([p.data.nelement() for p in net.parameters()])))
	if use_cuda:
		net = net.cuda()

	if args.opt == 'sgd':
		optimizer = optim.SGD(net.parameters(), lr=args.lr,
			                  momentum=args.momentum, weight_decay=args.wd)
	elif args.opt == 'adam':
		optimizer = optim.Adam(net.parameters(), weight_decay=args.wd)
	elif args.opt == 'rmsprop':
		optimizer = optim.RMSprop(net.parameters(), weight_decay=args.wd)

	criterion = MultiBoxLoss()

	if use_cuda:
		net.cuda()
		cudnn.benchmark = True

	if args.visdom:
		import visdom
		viz = visdom.Visdom()
		training_plot = viz.line(
			X=torch.zeros((1,)).cpu(),
			Y=torch.zeros((1, 3)).cpu(),
			opts=dict(
				xlabel='Epoch',
				ylabel='Loss',
				title='Epoch DSOD Training Loss',
				legend=['Loc Loss', 'Conf Loss', 'Loss']
				)
			)
		testing_plot = viz.line(
			X=torch.zeros((1,)).cpu(),
			Y=torch.zeros((1, 3)).cpu(),
			opts=dict(
				xlabel='Epoch',
				ylabel='Loss',
				title='Epoch DSOD Testing Loss',
				legend=['Loc Loss', 'Conf Loss', 'Loss']
				)
			)

	with open(cfg.label_test) as f:
		test_lines = f.readlines()
		num_tests = len(test_lines)

		transform = trainTransform
		transform_viz = testTransform

		data_encoder = DataEncoder()
		if args.visdom:
			testing_image = viz.image(np.ones((3, 300, 300)),
			                      opts=dict(caption='Random Testing Image'))

	# TODO: save training data on log file
	# trainF = open(os.path.join(args.save, 'train.csv'), 'w')
	# testF = open(os.path.join(args.save, 'test.csv'), 'w')

	for epoch in range(start_epoch, start_epoch+args.nEpochs+1):
		adjust_opt(args.opt, optimizer, epoch)
		train(epoch, net, trainLoader, optimizer, criterion, use_cuda, args.visdom, viz=None)
		test(epoch, net, testLoader, optimizer, criterion, use_cuda, args.visdom, viz=None)

		if epoch%10 == 0:
			state = {
			      'net': net.state_dict(),
			      'loss': test_loss,
			      'epoch': epoch
			}
			if not os.path.isdir('checkpoint'):
				os.mkdir('checkpoint')
			torch.save(state, './checkpoint/ckpt_{:03d}.pth'.format(epoch))
Exemple #23
0
assert torch.cuda.is_available(), 'Error: CUDA not found!'
best_loss = float('inf')  # best test loss
start_epoch = 0  # start from epoch 0 or last epoch
best_train_loss = float('inf')

# Data
print('==> Preparing data..')
transform = transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
])

trainset = ListDataset(root='./data/retina-metro/',
                       list_file='./data/metro_train.txt',
                       train=True,
                       transform=transform,
                       input_size=416)
trainloader = torch.utils.data.DataLoader(trainset,
                                          batch_size=8,
                                          shuffle=True,
                                          num_workers=2,
                                          collate_fn=trainset.collate_fn)

testset = ListDataset(root='./data/retina-metro/',
                      list_file='./data/metro_val.txt',
                      train=False,
                      transform=transform,
                      input_size=416)
testloader = torch.utils.data.DataLoader(testset,
                                         batch_size=4,
assert torch.cuda.is_available(), 'Error: CUDA not found!'
best_loss = float('inf')  # best test loss
start_epoch = 0  # start from epoch 0 or last epoch

# Data
print('==> Preparing data..')
transform = transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
])

trainset = ListDataset(
    root=
    '/home/asprohy/data/VOC/VOCtrainval_11-May-2012/VOCdevkit/VOC2012/JPEGImages',
    list_file=
    '/home/asprohy/data/VOC/VOCtrainval_11-May-2012/VOCdevkit/VOC2012/ImageSets/Main/train.txt',
    xml_file=
    '/home/asprohy/data/VOC/VOCtrainval_11-May-2012/VOCdevkit/VOC2012/Annotations',
    train=True,
    transform=transform,
    input_size=400)
trainloader = torch.utils.data.DataLoader(trainset,
                                          batch_size=8,
                                          shuffle=True,
                                          num_workers=8,
                                          collate_fn=trainset.collate_fn)

testset = ListDataset(
    root=
    '/home/asprohy/data/VOC/VOCtrainval_11-May-2012/VOCdevkit/VOC2012/JPEGImages',
    list_file=
    '/home/asprohy/data/VOC/VOCtrainval_11-May-2012/VOCdevkit/VOC2012/ImageSets/Main/val.txt',
Exemple #25
0
parser.add_argument('--lr', default=0.1, type=float, help='learning rate')
parser.add_argument('--resume', '-r', action='store_true', help='resume from checkpoint')
args = parser.parse_args()

assert torch.cuda.is_available(), 'Error: CUDA not found!'
best_loss = float('inf')  # best test loss
start_epoch = 0  # start from epoch 0 or last epoch

# Data
print('==> Preparing data..')
transform = transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize((0.485,0.456,0.406), (0.229,0.224,0.225))
])

trainset = ListDataset(root='/search/odin/liukuang/data/voc_all_images',
                       list_file='./voc_data/test.txt', train=True, transform=transform, input_size=600, max_size=1000)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=16, shuffle=True, num_workers=8, collate_fn=trainset.collate_fn)

testset = ListDataset(root='/search/odin/liukuang/data/voc_all_images',
                      list_file='./voc_data/test.txt', train=False, transform=transform, input_size=600, max_size=1000)
testloader = torch.utils.data.DataLoader(testset, batch_size=16, shuffle=False, num_workers=8, collate_fn=testset.collate_fn)

# Model
net = RetinaNet()
if args.resume:
    print('==> Resuming from checkpoint..')
    checkpoint = torch.load('./checkpoint/ckpt.pth')
    net.load_state_dict(checkpoint['net'])
    best_loss = checkpoint['loss']
    start_epoch = checkpoint['epoch']
Exemple #26
0
def train():
    args = parse_args()

    assert torch.cuda.is_available(), 'Error: CUDA not found!'
    assert args.focal_loss, "OHEM + ce_loss is not working... :("

    if not os.path.exists(args.save_folder):
        os.mkdir(args.save_folder)

    if not os.path.exists(args.logdir):
        os.mkdir(args.logdir)

    ###########################################################################
    # Data
    ###########################################################################

    print('==> Preparing data..')
    trainset = ListDataset(root='/mnt/9C5E1A4D5E1A2116/datasets/',
                           dataset=args.dataset,
                           train=True,
                           transform=Augmentation_traininig,
                           input_size=args.input_size,
                           multi_scale=args.multi_scale)
    trainloader = torch.utils.data.DataLoader(trainset,
                                              batch_size=args.batch_size,
                                              shuffle=True,
                                              num_workers=args.num_workers,
                                              collate_fn=trainset.collate_fn)

    ###########################################################################

    # Training Detail option\
    stepvalues = (10000, 20000, 30000, 40000, 50000) if args.dataset in ["SynthText"] \
        else (2000, 4000, 6000, 8000, 10000)
    best_loss = float('inf')  # best test loss
    start_epoch = 0  # start from epoch 0 or last epoch
    iteration = 0
    cur_lr = args.lr
    mean = (0.485, 0.456, 0.406)
    var = (0.229, 0.224, 0.225)
    step_index = 0
    pEval = None

    ###########################################################################
    # Model
    ###########################################################################

    # set model (focal_loss vs OHEM_CE loss)
    if args.focal_loss:
        imagenet_pretrain = 'weights/retinanet_se50.pth'
        criterion = FocalLoss()
        num_classes = 1
    else:
        imagenet_pretrain = 'weights/retinanet_se50_OHEM.pth'
        criterion = OHEM_loss()
        num_classes = 2

    net = RetinaNet(num_classes)

    # Restore model weights
    net.load_state_dict(torch.load(imagenet_pretrain))

    if args.resume:
        print('==> Resuming from checkpoint..', args.resume)
        checkpoint = torch.load(args.resume)
        net.load_state_dict(checkpoint['net'])
        #start_epoch = checkpoint['epoch']
        #iteration = checkpoint['iteration']
        #cur_lr = checkpoint['lr']
        #step_index = checkpoint['step_index']
        # optimizer.load_state_dict(state["optimizer"])

    print("multi_scale : ", args.multi_scale)
    print("input_size : ", args.input_size)
    print("stepvalues : ", stepvalues)
    print("start_epoch : ", start_epoch)
    print("iteration : ", iteration)
    print("cur_lr : ", cur_lr)
    print("step_index : ", step_index)
    print("num_gpus : ", torch.cuda.device_count())

    # Data parellelism for multi-gpu training
    net = torch.nn.DataParallel(net,
                                device_ids=range(torch.cuda.device_count()))
    net.cuda()

    # Put model in training mode and freeze batch norm.
    net.train()
    net.module.freeze_bn()  # you must freeze batchnorm

    ###########################################################################
    # Optimizer
    ###########################################################################

    optimizer = optim.SGD(net.parameters(),
                          lr=cur_lr,
                          momentum=0.9,
                          weight_decay=1e-4)
    #optimizer = optim.Adam(net.parameters(), lr=cur_lr)

    ###########################################################################
    # Utils
    ###########################################################################

    encoder = DataEncoder()
    writer = SummaryWriter(log_dir=args.logdir)

    ###########################################################################
    # Training loop
    ###########################################################################

    t0 = time.time()
    for epoch in range(start_epoch, 10000):
        if iteration > args.max_iter:
            break

        for inputs, loc_targets, cls_targets in trainloader:
            inputs = Variable(inputs.cuda())
            loc_targets = Variable(loc_targets.cuda())
            cls_targets = Variable(cls_targets.cuda())

            optimizer.zero_grad()
            loc_preds, cls_preds = net(inputs)

            loc_loss, cls_loss = criterion(loc_preds, loc_targets, cls_preds,
                                           cls_targets)
            loss = loc_loss + cls_loss
            loss.backward()
            optimizer.step()

            if iteration % 20 == 0:
                t1 = time.time()

                print(
                    'iter ' + repr(iteration) + ' (epoch ' + repr(epoch) +
                    ') || loss: %.4f || l loc_loss: %.4f || l cls_loss: %.4f (Time : %.1f)'
                    % (loss.sum().item(), loc_loss.sum().item(),
                       cls_loss.sum().item(), (t1 - t0)))
                # t0 = time.time()

                writer.add_scalar('loc_loss', loc_loss.sum().item(), iteration)
                writer.add_scalar('cls_loss', cls_loss.sum().item(), iteration)
                writer.add_scalar('loss', loss.sum().item(), iteration)

                # show inference image in tensorboard
                infer_img = np.transpose(inputs[0].cpu().numpy(), (1, 2, 0))
                infer_img *= var
                infer_img += mean
                infer_img *= 255.
                infer_img = np.clip(infer_img, 0, 255)
                infer_img = infer_img.astype(np.uint8)
                h, w, _ = infer_img.shape

                boxes, labels, scores = encoder.decode(loc_preds[0],
                                                       cls_preds[0], (w, h))
                boxes = boxes.reshape(-1, 4, 2).astype(np.int32)

                if boxes.shape[0] != 0:
                    # infer_img = infer_img/np.float32(255)

                    # print(boxes)
                    # print(
                    #     f"infer_img prior to cv2.polylines - dtype: {infer_img.dtype}, shape: {infer_img.shape}, min: {infer_img.min()}, max: {infer_img.max()}")
                    # print(
                    #     f"boxes prior to cv2.polylines - dtype: {boxes.dtype}, shape: {boxes.shape}, min: {boxes.min()}, max: {boxes.max()}")
                    infer_img = cv2.polylines(infer_img.copy(), boxes, True,
                                              (0, 255, 0), 4)

                # print(
                #     f"infer_img - dtype: {infer_img.dtype}, shape: {infer_img.shape}, min: {infer_img.min()}, max: {infer_img.max()}")

                writer.add_image('image',
                                 infer_img,
                                 iteration,
                                 dataformats="HWC")
                writer.add_scalar('input_size', h, iteration)
                writer.add_scalar('learning_rate', cur_lr, iteration)

                t0 = time.time()

            if iteration % args.save_interval == 0 and iteration > 0:
                print('Saving state, iter : ', iteration)
                state = {
                    'net': net.module.state_dict(),
                    "optimizer": optimizer.state_dict(),
                    'iteration': iteration,
                    'epoch': epoch,
                    'lr': cur_lr,
                    'step_index': step_index
                }
                model_file = args.save_folder + \
                    'ckpt_' + repr(iteration) + '.pth'
                torch.save(state, model_file)

            if iteration in stepvalues:
                step_index += 1
                cur_lr = adjust_learning_rate(cur_lr, optimizer, args.gamma,
                                              step_index)

            if iteration > args.max_iter:
                break

            if args.evaluation and iteration % args.eval_step == 0:
                try:
                    if pEval is None:
                        print("Evaluation started at iteration {} on IC15...".
                              format(iteration))
                        eval_cmd = "CUDA_VISIBLE_DEVICES=" + str(args.eval_device) + \
                            " python eval.py" + \
                            " --tune_from=" + args.save_folder + 'ckpt_' + repr(iteration) + '.pth' + \
                            " --input_size=1024" + \
                            " --output_zip=result_temp1"

                        pEval = Popen(eval_cmd,
                                      shell=True,
                                      stdout=PIPE,
                                      stderr=PIPE)

                    elif pEval.poll() is not None:
                        (scorestring, stderrdata) = pEval.communicate()

                        hmean = float(
                            str(scorestring).strip().split(":")[3].split(",")
                            [0].split("}")[0].strip())

                        writer.add_scalar('test_hmean', hmean, iteration)

                        print("test_hmean for {}-th iter : {:.4f}".format(
                            iteration, hmean))

                        if pEval is not None:
                            pEval.kill()
                        pEval = None

                except Exception as e:
                    print("exception happened in evaluation ", e)
                    if pEval is not None:
                        pEval.kill()
                    pEval = None

            iteration += 1
Exemple #27
0
batch_size = 2
dice_score = 0
prec = np.zeros((2, 106))
ite = 0
####################################################

# Data
print('==> Preparing data..')
transform = transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225))
])

testset = ListDataset(root='./dataset/test/',
                      list_file='./metafile/ssd_test.txt',
                      train=False,
                      transform=transform)
testloader = torch.utils.data.DataLoader(testset,
                                         batch_size=batch_size,
                                         shuffle=False,
                                         drop_last=True)

net = SSD300()

if use_cuda:
    if resume:
        pass
        #net = torch.nn.DataParallel(net, device_ids=[0,1,2,3,4,5,6,7])
        net.cuda()
        cudnn.benchmark = True
Exemple #28
0
args = parser.parse_args()

assert torch.cuda.is_available(), 'Error: CUDA not found!'
best_loss = float('inf')  # best test loss
start_epoch = 0  # start from epoch 0 or last epoch

# Data
print('==> Preparing data..')
transform = transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
])

trainset = ListDataset(
    root='',
    list_file='/media/code/u_zhh/test/darknet/data/head/train.txt',
    train=True,
    transform=transform,
    input_size=600)
trainloader = torch.utils.data.DataLoader(trainset,
                                          batch_size=16,
                                          shuffle=True,
                                          num_workers=16,
                                          collate_fn=trainset.collate_fn)

testset = ListDataset(
    root='',
    list_file='/media/code/u_zhh/test/darknet/data/head/val.txt',
    train=False,
    transform=transform,
    input_size=600)
testloader = torch.utils.data.DataLoader(testset,
Exemple #29
0
learning_rate = 0.001
resume = False

batch_size = 1
####################################################

# Data
print('==> Preparing data..')
transform = transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225))
])

trainset = ListDataset(root='./dataset/train',
                       list_file='./metafile/train.txt',
                       train=True,
                       transform=transform)
trainloader = torch.utils.data.DataLoader(trainset,
                                          batch_size=batch_size,
                                          shuffle=True,
                                          drop_last=True)

testset = ListDataset(root='./dataset/train',
                      list_file='./metafile/train.txt',
                      train=False,
                      transform=transform)
testloader = torch.utils.data.DataLoader(testset,
                                         batch_size=batch_size,
                                         shuffle=False,
                                         drop_last=True)
Exemple #30
0
#use_cuda = torch.cuda.is_available()
train_ite = 0
test_ite = 0
use_cuda = True
best_loss = float('inf')  # best test loss
start_epoch = 0  # start from epoch 0 or last epoch

# Data
print('==> Preparing data..')
transform = transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225))
])

trainset = ListDataset(root='./dataset/train/',
                       list_file='./pretraindataconfig/train_patha.txt',
                       transform=transform)
trainloader = torch.utils.data.DataLoader(trainset,
                                          batch_size=p_batch_size,
                                          shuffle=True)

testset = ListDataset(root='./dataset/train/',
                      list_file='./pretraindataconfig/train_patha.txt',
                      transform=transform)
testloader = torch.utils.data.DataLoader(testset,
                                         batch_size=p_batch_size,
                                         shuffle=False)

# Model

if False: