コード例 #1
0
                    help='print frequency (default: 10)')

args = parser.parse_args()
# ##############################################################################################################
# ###################################  Setup for some configurations ###########################################
# ##############################################################################################################
# 如果我们每次训练的输入数据的size不变,那么开启这个就会加快我们的训练速度
torch.backends.cudnn.benchmark = True
use_cuda = torch.cuda.is_available()
checkpoint_path = args.checkpoint_path

# > TOCHECK: training configs
opt = TrainingOpt()
config = GetConfig(opt.config_name)
soureconfig = COCOSourceConfig(opt.hdf5_train_data)  # > 512.h5
train_data = MyDataset(config, soureconfig, shuffle=False,
                       augment=True)  # shuffle in data loader

soureconfig_val = COCOSourceConfig(opt.hdf5_val_data)
val_data = MyDataset(config, soureconfig_val, shuffle=False,
                     augment=False)  # shuffle in data loader

best_loss = float('inf')
start_epoch = 0  # 从0开始或者从上一个epoch开始

args.distributed = False
if 'WORLD_SIZE' in os.environ:
    args.distributed = int(os.environ['WORLD_SIZE']) > 1

args.gpu = 0
args.world_size = 1
コード例 #2
0
def train(**kwargs):
    opt._parse(kwargs)
    # device_num = 6
    data_root = "/home/lsm/TrainSet/"
    train_file = "train.txt"
    test_file = "test.txt"
    trainset = MyDataset(data_root, train_file, opt)
    testset = TestDataset(data_root, test_file, opt)
    print('load data')
    dataloader = data_.DataLoader(trainset, \
                                  batch_size=1, \
                                  shuffle=True, \
                                  # pin_memory=True,

                                  num_workers=opt.num_workers)

    test_dataloader = data_.DataLoader(testset,
                                       batch_size=1,
                                       num_workers=opt.test_num_workers,
                                       shuffle=False, \
                                       pin_memory=True
                                       )
    faster_rcnn = FasterRCNNVGG16()
    print('model construct completed')
    trainer = FasterRCNNTrainer(faster_rcnn).cuda()
    if opt.load_path:
        trainer.load(opt.load_path)
        print('load pretrained model from %s' % opt.load_path)
    # trainer.vis.text(dataset.db.label_names, win='labels')
    best_map = 0
    lr_ = opt.lr
    f = open('log.txt', 'w')
    for epoch in range(opt.epoch):
        trainer.reset_meters()
        print("epoch " + str(epoch) + " ...")
        for ii, (img, bbox_, label_, scale) in tqdm(enumerate(dataloader)):
            # break
            # for ii, (img, bbox_, label_, scale) in enumerate(dataloader):
            # print(ii)
            scale = at.scalar(scale)
            img, bbox, label = img.cuda().float(), bbox_.cuda(), label_.cuda()
            trainer.train_step(img, bbox, label, scale)

            # if (ii + 1) % opt.plot_every == 0:
            #     if os.path.exists(opt.debug_file):
            #         ipdb.set_trace()
            #
            #     # plot loss
            #     # trainer.vis.plot_many(trainer.get_meter_data())
            #
            #     # plot groud truth bboxes
            #     ori_img_ = inverse_normalize(at.tonumpy(img[0]))
            #     # gt_img = visdom_bbox(ori_img_,
            #     #                      at.tonumpy(bbox_[0]),
            #     #                      at.tonumpy(label_[0]))
            #     # trainer.vis.img('gt_img', gt_img)
            #
            #     # plot predicti bboxes
            #     _bboxes, _labels, _scores = trainer.faster_rcnn.predict([ori_img_], visualize=True)

        eval_result = eval(test_dataloader, faster_rcnn, test_num=opt.test_num)

        lr_ = trainer.faster_rcnn.optimizer.param_groups[0]['lr']
        log_info = 'lr:{}, map:{},loss:{}'.format(
            str(lr_), str(eval_result['map']), str(trainer.get_meter_data()))
        print(log_info)
        # trainer.vis.log(log_info)
        # print(str(lr_)+": loss = "+str(trainer.get_meter_data()))
        f.write(log_info)

        if eval_result['map'] > best_map:
            best_map = eval_result['map']
            best_path = trainer.save(best_map=best_map)
        if epoch == 9:
            trainer.load(best_path)
            trainer.faster_rcnn.scale_lr(opt.lr_decay)
            lr_ = lr_ * opt.lr_decay
        if epoch == 19:
            trainer.load(best_path)
            trainer.faster_rcnn.scale_lr(opt.lr_decay)
            lr_ = lr_ * opt.lr_decay

        if epoch == 50:
            break
    f.close()
コード例 #3
0
ファイル: test.py プロジェクト: Aaronwd/PalmLocNet
import torch
import os
from torchvision import transforms, models
from torch.utils.data import DataLoader

######pic_size = 480
#######pic_resize =224

device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')

if os.path.exists('./picture_total/testset/test.txt'):
    print('test.txt has been existed')
    transforms = transforms.Compose(
        [transforms.Resize(224), transforms.ToTensor()])

    test_data = MyDataset(txt='./picture_total/testset/test.txt',
                          transform=transforms)
    test_loader = DataLoader(dataset=test_data, batch_size=10, num_workers=8)
else:
    print('you need to prepare your test.txt first!')


# 只加载训练好的参数
def test_PalmLocNet(test_x, test_y):
    vgg = models.vgg16_bn(pretrained=False)
    PLNet = vggPalmLocNet(vgg)
    PLNet.eval()
    if torch.cuda.is_available():
        PLNet.load_state_dict({
            k.replace('module.', ''): v
            for k, v in torch.load(
                './checkpoints/train_params_best.pth').items()
コード例 #4
0
ファイル: train.py プロジェクト: Aaronwd/PalmLocNet
                help="folder to store model")
parser.add_argument('-p','--PICTUREFOLDER',type= str, default='./picture_total/',
                help="folder to store trained picture")

args = parser.parse_args()

device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')

if os.path.exists(args.PICTUREFOLDER+'trainset/'+'train.txt') and os.path.exists(args.PICTUREFOLDER+'testset/'+'test.txt') :
    print('train.txt and test.txt have been existed')
    transforms = transforms.Compose([
        transforms.Resize(224),
        transforms.ToTensor()
    ])

    train_data = MyDataset(txt=args.PICTUREFOLDER + 'trainset/' + 'train.txt', transform=transforms)
    test_data = MyDataset(txt=args.PICTUREFOLDER + 'testset/' + 'test.txt', transform=transforms)
    train_loader = DataLoader(dataset=train_data, batch_size=args.BATCH_SIZE, shuffle=True, num_workers=8)
    test_loader = DataLoader(dataset=test_data, batch_size=10, num_workers=8)
else:
    print('you need to prepare your train.txt and test.txt first!')

#测试数据
for k, (tx, ty) in enumerate(test_loader):
    test_x = tx.to(device)
    test_y = ty.to(device)/480

# 训练以及保存模型数据
def train_PalmLocNet(train_loader, test_x, test_y):
    if os.path.exists(args.MODELFOLDER + 'train_params_best.pth'):
        print('reload the last best model parameters')