Exemple #1
0
def main():
    print('please choose net from:', cfg.net_style)
    netconfig = choose_net(FLAGS,
                           [vgg16_config, vgg19_config, resnet50_config])
    train_dataset = yoloDataset(cfg.common_params, cfg.dataset_params,
                                cfg.dataset_params['train_file'])
    test_dataset = yoloDataset(cfg.common_params,
                               cfg.dataset_params,
                               cfg.dataset_params['test_file'],
                               train=False)
    dataset = {'train': train_dataset, 'test': test_dataset}
    yololoss = yoloLoss(cfg.common_params, netconfig)
    solver = YoloSolver(dataset, netconfig, yololoss, cfg.common_params,
                        cfg.solver_params)
    solver.solve()
Exemple #2
0
params = []
params_dict = dict(net.named_parameters())
for key, value in params_dict.items():
    if key.startswith('features'):
        params += [{'params': [value], 'lr': learning_rate * 1}]
    else:
        params += [{'params': [value], 'lr': learning_rate}]
optimizer = torch.optim.SGD(params,
                            lr=learning_rate,
                            momentum=0.9,
                            weight_decay=5e-4)
# optimizer = torch.optim.Adam(net.parameters(),lr=learning_rate,weight_decay=1e-4)

# train_dataset = yoloDataset(root=file_root,list_file=['voc12_trainval.txt','voc07_trainval.txt'],train=True,transform = [transforms.ToTensor()] )
train_dataset = yoloDataset(root=file_root,
                            list_file=['voc2012.txt', 'voc2007.txt'],
                            train=True,
                            transform=[transforms.ToTensor()])
train_loader = DataLoader(train_dataset,
                          batch_size=batch_size,
                          shuffle=True,
                          num_workers=4)
# test_dataset = yoloDataset(root=file_root,list_file='voc07_test.txt',train=False,transform = [transforms.ToTensor()] )
test_dataset = yoloDataset(root=file_root,
                           list_file='voc2007test.txt',
                           train=False,
                           transform=[transforms.ToTensor()])
test_loader = DataLoader(test_dataset,
                         batch_size=batch_size,
                         shuffle=False,
                         num_workers=4)
print('the dataset has %d images' % (len(train_dataset)))
Exemple #3
0
net.train()
# different learning rate
params = []
params_dict = dict(net.named_parameters())
for key, value in params_dict.items():
    if key.startswith('features'):
        params += [{'params': [value], 'lr': learning_rate * 1}]
    else:
        params += [{'params': [value], 'lr': learning_rate}]
optimizer = torch.optim.SGD(params,
                            lr=learning_rate,
                            momentum=0.9,
                            weight_decay=5e-4)

train_dataset = yoloDataset(root=file_root,
                            img_size=448,
                            transforms=[transforms.ToTensor()],
                            train=True)  #S=7, B=2, C=16,
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
test_dataset = yoloDataset(root=file_root,
                           img_size=448,
                           transforms=[transforms.ToTensor()],
                           train=False)  #S=7, B=2, C=16,
test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False)
##
print('the dataset has %d images' % (len(train_dataset)))
print('the batch_size is %d' % (batch_size))
logfile = open('log.txt', 'w')

num_iter = 0
# vis = Visualizer(env='xiong')
best_test_loss = np.inf
    net.train()
    # different learning rate
    params = []
    params_dict = dict(net.named_parameters())
    for key, value in params_dict.items():
        if key.startswith('features'):
            params += [{'params': [value], 'lr': learning_rate * 1}]
        else:
            params += [{'params': [value], 'lr': learning_rate}]
    optimizer = torch.optim.SGD(params,
                                lr=learning_rate,
                                momentum=0.9,
                                weight_decay=1e-4)

    train_dataset = yoloDataset(root=file_root,
                                list_file='./voc2012train.txt',
                                train=True,
                                transform=[transforms.ToTensor()])
    train_loader = DataLoader(train_dataset,
                              batch_size=batch_size,
                              shuffle=True,
                              num_workers=4)
    test_dataset = yoloDataset(root=file_root,
                               list_file='./voc2012val.txt',
                               train=False,
                               transform=[transforms.ToTensor()])
    test_loader = DataLoader(test_dataset,
                             batch_size=batch_size,
                             shuffle=False,
                             num_workers=4)
    print('the training dataset has %d images' % (len(train_dataset)))
    print('the batch_size is %d' % (batch_size))
Exemple #5
0
    net.cuda()

net.train()
# different learning rate
params = []
params_dict = dict(net.named_parameters())
for key, value in params_dict.items():
    if key.startswith('features'):
        params += [{'params': [value], 'lr':learning_rate*1}]
    else:
        params += [{'params': [value], 'lr':learning_rate}]
optimizer = torch.optim.SGD(
    params, lr=learning_rate, momentum=0.9, weight_decay=5e-4)
# optimizer = torch.optim.Adam(net.parameters(),lr=learning_rate,weight_decay=1e-4)

train_dataset = yoloDataset(root=train_file_root, list_file='../data/voc2012trainval.txt',
                            train=True, transform=[transforms.ToTensor()])
# train_dataset = yoloDataset(root=train_file_root, list_file=[
#                             'voc2012.txt', 'voc2007.txt'], train=True, transform=[transforms.ToTensor()])
train_loader = DataLoader(
    # train_dataset, batch_size=batch_size, shuffle=True, num_workers=4)
    train_dataset, batch_size=batch_size, shuffle=True)
# test_dataset = yoloDataset(root=train_file_root,list_file='voc07_test.txt',train=False,transform = [transforms.ToTensor()] )
test_dataset = yoloDataset(root=validation_file_root, list_file='../data/voc2007test.txt',
                           train=False, transform=[transforms.ToTensor()])
test_loader = DataLoader(
    # test_dataset, batch_size=batch_size, shuffle=False, num_workers=4)
    # test_dataset, batch_size=batch_size, shuffle=False)
    test_dataset, batch_size=3, shuffle=False)
print('the dataset has %d images' % (len(train_dataset)))
print('the batch_size is %d' % (batch_size))
logfile = open('log.txt', 'w')
Exemple #6
0
#different learning rate
params = []
params_dict = dict(net.named_parameters())
for key, value in params_dict.items():
    if key.startswith('features'):
        params += [{'params': [value], 'lr': learning_rate * 1}]
    else:
        params += [{'params': [value], 'lr': learning_rate}]
#optimizer
optimizer = torch.optim.SGD(params,
                            lr=learning_rate,
                            momentum=0.9,
                            weight_decay=5e-4)

train_dataset = yoloDataset(root=file_root,
                            list_file=['voc2007train.txt'],
                            train=True,
                            transform=[transforms.ToTensor()])
train_loader = DataLoader(train_dataset,
                          batch_size=batch_size,
                          shuffle=False,
                          num_workers=0)
print(f'the dataset has {len(train_dataset)} images')
print(f'the batchsize is {batch_size}')

logfile = open('log.txt', 'w')

num_iter = 0
#vis = Visualizer(env='xiong')
best_test_loss = np.inf

#train
# different learning rate
params = []
params_dict = dict(net.named_parameters())
for key, value in params_dict.items():
    if key.startswith('features'):
        params += [{'params': [value], 'lr': learning_rate * 1}]
    else:
        params += [{'params': [value], 'lr': learning_rate}]
optimizer = torch.optim.SGD(params,
                            lr=learning_rate,
                            momentum=0.9,
                            weight_decay=5e-4)
# optimizer = torch.optim.Adam(net.parameters(),lr=learning_rate,weight_decay=1e-4)

train_dataset = yoloDataset(root=train_root,
                            list_file=['RSNAtrain.txt'],
                            train=True,
                            transform=[transforms.ToTensor()])
train_loader = DataLoader(train_dataset,
                          batch_size=batch_size,
                          shuffle=True,
                          num_workers=4)
test_dataset = yoloDataset(root=test_root,
                           list_file='RSNAtest.txt',
                           train=False,
                           transform=[transforms.ToTensor()])
test_loader = DataLoader(test_dataset,
                         batch_size=batch_size,
                         shuffle=False,
                         num_workers=4)
print('the dataset has %d images' % (len(train_dataset)))
print('the batch_size is %d' % (batch_size))
Exemple #8
0
if CHANNEL_LAST:
    net = net.to(memory_format=torch.channels_last)
net.train()
# different learning rate
params = []
params_dict = dict(net.named_parameters())
for key, value in params_dict.items():
    if key.startswith('features'):
        params += [{'params': [value], 'lr': learning_rate * 1}]
    else:
        params += [{'params': [value], 'lr': learning_rate}]
optimizer = torch.optim.SGD(params, lr=learning_rate, momentum=0.9, weight_decay=5e-4)
# optimizer = torch.optim.Adam(net.parameters(),lr=learning_rate,weight_decay=1e-4)

# train_dataset = yoloDataset(root=file_root,list_file=['voc12_trainval.txt','voc07_trainval.txt'],train=True,transform = [transforms.ToTensor()] )
train_dataset = yoloDataset(root=file_root, list_file='voc12_trainval.txt', train=True,
                            transform=[transforms.ToTensor()])
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=4)
# test_dataset = yoloDataset(root=file_root,list_file='voc07_test.txt',train=False,transform = [transforms.ToTensor()] )
test_dataset = yoloDataset(root=file_root.replace("VOC2012", "VOC2007"), list_file='voc2007test.txt', train=False,
                           transform=[transforms.ToTensor()])
test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False, num_workers=4)
print('the dataset has %d images' % (len(train_dataset)))
print('the batch_size is %d' % (batch_size))
logfile = open('log.txt', 'w')
num_iter = 0
vis = Visualizer(env='main')
best_test_loss = np.inf
tt = Timer()
for epoch in range(num_epochs):
    net.train()
    # if epoch == 1:
Exemple #9
0
def main():
    '''Parse argument.'''
    parser = argparse.ArgumentParser(description='Pytorch XNOR-YOLO Training')

    parser.add_argument('--epochs',
                        default=300,
                        type=int,
                        metavar='N',
                        help='number of total epochs to run')
    parser.add_argument('--lr',
                        '--learning-rate',
                        default=0.001,
                        type=float,
                        metavar='LR',
                        help='initial learning rate')
    parser.add_argument('--l',
                        '--wd',
                        default=1e-5,
                        type=float,
                        metavar='W',
                        help='weight decay (default: 1e-5)')
    parser.add_argument('--pretrained',
                        dest='pretrained',
                        action='store_true',
                        default=False,
                        help='use pre-trained model')
    parser.add_argument('--mixnet',
                        dest='mixnet',
                        action='store_true',
                        default=False,
                        help='use mixnet model')
    parser.add_argument('--start-epoch',
                        default=0,
                        type=int,
                        metavar='N',
                        help='manual epoch number (useful on restarts)')
    parser.add_argument('--resume',
                        default='',
                        type=str,
                        metavar='PATH',
                        help='path to latest checkpoint (default: none)')
    parser.add_argument('-b',
                        '--batch-size',
                        default=8,
                        type=int,
                        metavar='N',
                        help='mini-batch size (default: 256)')
    global args
    args = parser.parse_args()
    '''Data loading module'''
    train_dataset = yoloDataset(
        root='/mnt/lustre/share/DSK/datasets/VOC07+12/JPEGImages/',
        list_file=['./meta/voc2007.txt', './meta/voc2012.txt'],
        train=True,
        transform=[transforms.ToTensor()])
    train_loader = DataLoader(train_dataset,
                              batch_size=args.batch_size,
                              shuffle=True,
                              num_workers=4)

    test_dataset = yoloDataset(
        root='/mnt/lustre/share/DSK/datasets/VOC07+12/JPEGImages/',
        list_file='./meta/voc2007test.txt',
        train=False,
        transform=[transforms.ToTensor()])

    test_loader = DataLoader(test_dataset,
                             batch_size=args.batch_size,
                             shuffle=False,
                             num_workers=4)
    '''Create model.'''
    teacher_model = vgg16(pretrained=False)
    student_model = vgg16XNOR(pretrained=False)

    teacher_model = torch.nn.DataParallel(teacher_model)
    student_model.features = torch.nn.DataParallel(student_model.features)
    teacher_model.cuda()
    student_model.cuda()
    '''Define loss functin i.e. YoloLoss and optimizer i.e. ADAM'''
    gt_criterion = yoloLoss(7, 2, 5, 0.5)
    mm_criterion = nn.MSELoss()
    optimizer = torch.optim.Adam(student_model.parameters(),
                                 args.lr,
                                 weight_decay=args.l)
    '''weight initialization'''
    for m in student_model.modules():
        if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):
            c = float(m.weight.data[0].nelement())
            m.weight.data = m.weight.data.normal_(0, 2.0 / c)
        elif isinstance(m, nn.BatchNorm2d):
            m.weight.data = m.weight.data.zero_().add(1.0)
            m.bias.data = m.bias.data.zero_()
    '''weight loading'''
    teacher_model.load_state_dict(
        torch.load('./experiment/vgg16fp/checkpoint.pth'))
    if args.resume:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint['epoch']
            best_loss = checkpoint['best_loss']
            student_model.load_state_dict(checkpoint['state_dict'])
            optimizer.load_state_dict(checkpoint['optimizer'])
            print("=> loaded checkpoint '{}' (epoch {})".format(
                args.resume, checkpoint['epoch']))
            del checkpoint
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))

    print student_model, teacher_model
    '''Define binarization operator.'''
    global bin_op
    bin_range = [1, 11]
    bin_op = util.BinOp(student_model, bin_range)

    best_loss = 100

    for epoch in range(args.start_epoch, args.epochs):

        adjust_learning_rate(optimizer, epoch)
        '''Train& validate for one epoch.'''
        train(train_loader, student_model, teacher_model, gt_criterion,
              mm_criterion, optimizer, epoch)
        val_loss = validate(test_loader, student_model, teacher_model,
                            gt_criterion)

        is_best = val_loss < best_loss
        best_loss = min(val_loss, best_loss)

        save_checkpoint(
            {
                'epoch': epoch + 1,
                'state_dict': student_model.state_dict(),
                'best_loss': best_loss,
                'optimizer': optimizer.state_dict(),
            }, is_best)
Exemple #10
0
# different learning rate
params = []
params_dict = dict(net.named_parameters())
for key, value in params_dict.items():
    if key.startswith('features'):
        params += [{'params': [value], 'lr': learning_rate * 1}]
    else:
        params += [{'params': [value], 'lr': learning_rate}]
optimizer = torch.optim.SGD(params,
                            lr=learning_rate,
                            momentum=0.9,
                            weight_decay=5e-4)

train_dataset = yoloDataset(
    root=file_root,
    list_file='/home/duanyajun/文档/目标识别项目/自己改写代码/mobilenet_yolov1/mytrain.txt',
    train=True,
    transform=[transforms.ToTensor()])
train_loader = DataLoader(train_dataset,
                          batch_size=batch_size,
                          shuffle=True,
                          num_workers=0)
#test_dataset = yoloDataset(root=file_root,list_file='./my2007.txt',train=False,transform = [transforms.ToTensor()] )
#test_loader = DataLoader(test_dataset,batch_size=batch_size,shuffle=False,num_workers=0)
print('the dataset has %d images' % (len(train_dataset)))
print('the batch_size is %d' % (batch_size))
logfile = open('/home/duanyajun/文档/目标识别项目/自己改写代码/mobilenet_yolov1/log.txt',
               'w')

num_iter = 0
vis = Visualizer(env='chen')
Exemple #11
0
    net.cuda()

net.train()
# different learning rate
params=[]
params_dict = dict(net.named_parameters())
for key,value in params_dict.items():
    if key.startswith('features'):
        params += [{'params':[value],'lr':learning_rate*1}]
    else:
        params += [{'params':[value],'lr':learning_rate}]
optimizer = torch.optim.SGD(params, lr=learning_rate, momentum=0.9, weight_decay=5e-4)
# optimizer = torch.optim.Adam(net.parameters(),lr=learning_rate,weight_decay=1e-4)

# train_dataset = yoloDataset(root=file_root,list_file=['voc12_trainval.txt','voc07_trainval.txt'],train=True,transform = [transforms.ToTensor()] )
train_dataset = yoloDataset(root=file_root,list_file=["/tmp/bt_im.txt"],train=True,transform = [transforms.ToTensor()] )
train_loader = DataLoader(train_dataset,batch_size=batch_size,shuffle=True,num_workers=4)
# test_dataset = yoloDataset(root=file_root,list_file='voc07_test.txt',train=False,transform = [transforms.ToTensor()] )
# test_dataset = yoloDataset(root=file_root,list_file='voc2007test.txt',train=False,transform = [transforms.ToTensor()] )
# test_loader = DataLoader(test_dataset,batch_size=batch_size,shuffle=False,num_workers=4)
print('the dataset has %d images' % (len(train_dataset)))
print('the batch_size is %d' % (batch_size))
logfile = open('log.txt', 'w')

num_iter = 0
# vis = Visualizer(env='xiong')
best_test_loss = np.inf

for epoch in range(num_epochs):
    net.train()
    # if epoch == 1: