예제 #1
0
def main(**kwargs):

    options._parse(kwargs)
    args = options
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpus
    torch.cuda.manual_seed(317)

    with open(args.single_track_dataset, 'r') as outfile:
        args.single_track_data = json.load(outfile)

    model = SiamRPNPP()
    optimizer, lr_scheduler = build_opt_lr(model,
                                           args.single_track_start_epoch, args)
    if args.single_track_load_model:
        model, optimizer = load_model(model, opt.single_track_load_model,
                                      optimizer)

    # model = model.cuda()
    model = model.eval()

    for epoch in range(args.single_track_start_epoch,
                       args.single_track_num_epochs):
        if args.backbone_train_epoch == epoch:
            optimizer, lr_scheduler = build_opt_lr(model, epoch, args)

        lr_scheduler.step(epoch)
        cur_lr = lr_scheduler.get_cur_lr()

        train(model, optimizer, epoch, args)

        if epoch + 1 % 100 == 0:
            path = os.path.join(opt.save_dir, 'model_{}.pth'.format(epoch))
            save_model(path, epoch, model, optimizer)
예제 #2
0
def train(**kwargs):
    opt._parse(kwargs)
    dataset = Dataset(opt)
    print('load data')
    dataloader = data_.DataLoader(dataset, \
           batch_size=1, \
           shuffle=True, \
								# pin_memory=True,

           num_workers=opt.num_workers)
    print('Loading Model')
    # faster_rcnn = FasterRCNNVGG16()
    print('model construct completed')
    # trainer = FasterRCNNTrainer(faster_rcnn).cuda()
    lr_ = opt.lr
    extractor, classifier = decom_vgg16()
    img, bbox_, label_, scale = dataset[1]
    _, H, W = img.shape
    img_size = (H, W)
    img, bbox_, label_ = to_tensor(img), to_tensor(bbox_), to_tensor(label_)
    scale = at.scalar(scale)
    img, bbox, label = img.cuda().float(), bbox_.cuda(), label_.cuda()
    img, bbox, label = Variable(img), Variable(bbox), Variable(label)
    pdb.set_trace()
    features = extractor(img)

    rpn = RegionProposalNetwork(512,
                                512,
                                ratios=ratios,
                                anchor_scales=anchor_scales,
                                feat_stride=self.feat_stride)

    rpn_locs, rpn_scores, rois, roi_indices, anchor = \
            self.faster_rcnn.rpn(features, img_size, scale
            )
예제 #3
0
 def load(
     self,
     path,
     load_optimizer=False,
     parse_opt=False,
     debug=False,
     simple=opt.use_simple,
 ):
     state_dict = t.load(path)
     if 'model' in state_dict:
         sd = self.generate_state_dict(state_dict['model'], simple, debug)
         self.faster_rcnn.load_state_dict(sd)
     else:
         sd = self.generate_state_dict(state_dict, simple, debug)
         self.faster_rcnn.load_state_dict(sd)
         return self
     if parse_opt:
         opt._parse(state_dict['config'])
     if 'optimizer' in state_dict and load_optimizer:
         self.optimizer.load_state_dict(state_dict['optimizer'])
     if 'sparse' in state_dict and state_dict['sparse'] == True:
         print("Reverting to Sparse")
         self.revert_to_sparse(state_dict['sparse_list'])
     print(f"Successfully Loaded Model: {path}")
     return self
예제 #4
0
파일: train.py 프로젝트: TWSFar/CSRNet
def train(**kwargs):
    opt._parse(kwargs)
    trainer = Trainer()
    for epoch in range(opt.start_epoch, opt.epochs):
        # train
        trainer.train(epoch)

        # val
        mae = trainer.validate(epoch)

        is_best = mae < trainer.best_pred
        trainer.best_pred = min(mae, trainer.best_pred)
        print(' * best MAE {mae:.3f}'.format(mae=trainer.best_pred))
        if (epoch % 20 == 0 and epoch != 0) or is_best:
            trainer.saver.save_checkpoint(
                {
                    'epoch':
                    epoch + 1,
                    'state_dict':
                    trainer.model.module.state_dict()
                    if opt.use_mulgpu else trainer.model.state_dict(),
                    'best_pred':
                    trainer.best_pred,
                    'optimizer':
                    trainer.optimizer.state_dict(),
                }, is_best)
예제 #5
0
def test(**kwargs):
    opt.load_path = \
        '/home/fengkai/PycharmProjects/my-faster-rcnn/checkpoints/fasterrcnn_04231732_0.6941460588341642'
    opt._parse(
        kwargs
    )  #将调用函数时候附加的参数用,config.py文件里面的opt._parse()进行解释,然后获取其数据存储的路径,之后放到Dataset里面!

    testset = TestDataset(opt)
    test_dataloader = data_.DataLoader(
        testset,
        batch_size=1,
        num_workers=opt.test_num_workers,
        shuffle=False,
        #pin_memory=True
    )  #pin_memory锁页内存,开启时使用显卡的内存,速度更快

    faster_rcnn = FasterRCNNVGG16()
    print(faster_rcnn)
    print('model construct completed')
    trainer = FasterRCNNTrainer(faster_rcnn).cuda()
    #判断opt.load_path是否存在,如果存在,直接从opt.load_path读取预训练模型,然后将训练数据的label进行可视化操作

    trainer.load(opt.load_path)
    print('load pretrained model from %s' % opt.load_path)

    eval_result = eval(test_dataloader, faster_rcnn, test_num=opt.test_num)
    print('map is: ', str(eval_result['map']))
def main(**kwargs):
    opt._parse(kwargs)
    # n_gpu = utils.set_gpu(opt.gpu)

    test_dataset = FashionAIKeypoints(opt, phase='test')
    encoder = test_dataset.encoder
    df = utils.data_frame_template()

    print('Testing: {}'.format(opt.category))
    print('Testing sample number: {}'.format(len(val_dataset)))
    cudnn.benchmark = True

    net = getattr(models, opt.model)(opt)
    checkpoint = torch.load(opt.load_checkpoint_path)  # Must be before cuda
    net.load_state_dict(checkpoint['state_dict'])
    net = net.cuda()
    # net = DataParallel(net)
    net.eval()

    for idx in tqdm(range(len(test_dataset))):
        img_path = test_dataset.get_image_path(idx)
        img0 = cv2.imread(img_path)  # BGR
        img0_flip = cv2.flip(img0, 1)
        img_h, img_w, _ = img0.shape

        scale = opt.img_max_size / max(img_w, img_h)

        with torch.no_grad():
            hm_pred = utils.compute_keypoints(opt, img0, net, encoder)
            hm_pred_flip = utils.compute_keypoints(opt, img0_flip, net, encoder, doflip=True)
예제 #7
0
def train(**kwargs):
    opt._parse(kwargs)

    dataset = Dataset(opt)
    print('load data')
    dataloader = data_.DataLoader(dataset, \
                                  batch_size=1, \
                                  shuffle=True, \
                                #pin_memory=True,
                                  num_workers=opt.num_workers)
    testset = TestDataset(opt)
    test_dataloader = data_.DataLoader(testset,
                                       batch_size=1,
                                       num_workers=opt.test_num_workers,
                                       shuffle=False, \
                                       pin_memory=True
                                       )
    faster_rcnn = FasterRCNNVGG16()
    print('model construct completed')
    trainer = FasterRCNNTrainer(faster_rcnn).cuda()
    if opt.load_path:
        trainer.load(opt.load_path)
        print('load pretrained model from %s' % opt.load_path)

    trainer.vis.text(dataset.db.label_names, win='labels')
    best_map = 0
def train(**kwargs):
    opt._parse(kwargs)  # 解析配置参数
    #
    dataset = Dataset(opt)  # 训练集 voc2007  5011 张
    print('load data')
    dataloader = data_.DataLoader(dataset, \
                                  batch_size=1, \
                                  shuffle=True, \
                                  # pin_memory=True,
                                  num_workers=opt.num_workers)
    testset = TestDataset(opt, split='val')  # 验证集 2500左右
    test_dataloader = data_.DataLoader(testset,
                                       batch_size=1,
                                       num_workers=opt.test_num_workers,
                                       shuffle=False, \
                                       pin_memory=True
                                       )
    faster_rcnn = FasterRCNN_ResNet50()  # 生成一个faster-rcnn实例
    print('model construct completed')
    trainer = FasterRCNNTrainer(faster_rcnn).cuda()  # tocuda
    if opt.load_path:  # 加载与训练模型
        trainer.load(opt.load_path)
        print('load pretrained model from %s' % opt.load_path)

    best_map = 0
    lr_ = opt.lr
    writer = SummaryWriter('logs', comment='faster-rcnn-vgg16')
    global_step = 0
    for epoch in range(opt.epoch):  # 开始迭代 14轮  0-12  13个epoch

        for ii, (img, bbox_, label_, scale) in tqdm(enumerate(dataloader)):
            scale = at.scalar(scale)
            img, bbox, label = img.cuda().float(), bbox_.cuda(), label_.cuda()
            loss = trainer.train_step(img, bbox, label, scale)
            rpn_loc_loss, rpn_cls_loss, roi_loc_loss, roi_cls_loss, total_loss = loss
            writer.add_scalar('rpn_loc_loss', rpn_loc_loss.detach().cpu().numpy(), global_step)
            writer.add_scalar('rpn_cls_loss', rpn_cls_loss.detach().cpu().numpy(), global_step)
            writer.add_scalar('roi_loc_loss', roi_loc_loss.detach().cpu().numpy(), global_step)
            writer.add_scalar('roi_cls_loss', roi_cls_loss.detach().cpu().numpy(), global_step)
            writer.add_scalar('total_loss', total_loss.detach().cpu().numpy(), global_step)
            global_step += 1
            if (ii + 1) % opt.plot_every == 0:
                pass
        eval_result = eval(test_dataloader, faster_rcnn, test_num=opt.test_num)

        lr_ = trainer.faster_rcnn.optimizer.param_groups[0]['lr']
        log_info = 'lr:{}, map:{}'.format(str(lr_), str(eval_result['map']))
        print(log_info)

        if eval_result['map'] > best_map:
            best_map = eval_result['map']
            best_path = trainer.save(best_map=best_map)
        if epoch == 9:
            trainer.load(best_path)
            trainer.faster_rcnn.scale_lr(opt.lr_decay)
            lr_ = lr_ * opt.lr_decay

        if epoch == 13:
            break
예제 #9
0
def main(**kwargs):
    opt._parse(kwargs)
    # n_gpu = utils.set_gpu(opt.gpu)

    val_dataset = FashionAIKeypoints(opt, phase='val')
    encoder = val_dataset.encoder
    nes = []

    print('Evaluating: {}'.format(opt.category))
    print('Validation sample number: {}'.format(len(val_dataset)))
    cudnn.benchmark = True

    net = getattr(models, opt.model)(opt)
    checkpoint = torch.load(opt.load_checkpoint_path)  # Must be before cuda
    net.load_state_dict(checkpoint['state_dict'])
    net = net.cuda()
    # net = DataParallel(net)
    net.eval()

    for idx in tqdm(range(len(val_dataset))):
        img_path = val_dataset.get_image_path(idx)
        kpts = val_dataset.get_keypoints(idx)
        img0 = cv2.imread(img_path)  #X
        img0_flip = cv2.flip(img0, 1)
        img_h, img_w, _ = img0.shape

        scale = opt.img_max_size / max(img_w, img_h)

        with torch.no_grad():
            hm_pred = utils.compute_keypoints(opt, img0, net, encoder)
            hm_pred2 = utils.compute_keypoints(opt,
                                               img0_flip,
                                               net,
                                               encoder,
                                               doflip=True)

        x, y = encoder.decode_np(hm_pred + hm_pred2, scale, opt.hm_stride,
                                 (img_w / 2, img_h / 2))
        keypoints = np.stack([x, y, np.ones(x.shape)], axis=1).astype(np.int16)

        if args.visual:
            kpt_img = utils.draw_keypoints(img0, keypoints)
            save_img_path = str(opt.db_path /
                                'tmp/one_{0}{1}.png'.format(opt.category, idx))
            cv2.imwrite(save_img_path, kpt_img)

        left, right = opt.datum
        x1, y1, v1 = kpts[left]
        x2, y2, v2 = kpts[right]

        if v1 == -1 or v2 == -1:
            continue

        width = math.sqrt((x2 - x1)**2 + (y2 - y1)**2)
        ne = utils.normalized_error(keypoints, kpts, width)
        nes.append([ne])

    nes = np.array(nes)
    print(np.mean(nes, axis=0))
def main(**kwarg):

    # opt config
    opt._parse(kwargs)

    # Data Loader
    data_loader = DLoader(opt)
    train_loader, test_loader, test_video = data_loader.run()
예제 #11
0
def train(**kwargs):
    opt._parse(kwargs)

    dataset = Dataset(opt)
    # img, bbox, label, scale = dataset[0]
    # 返回的img是被scale后的图像,可能已经被随机翻转了
    # 返回的 bbox 按照 ymin xmin ymax xmax 排列
    #  H, W = size(im)
    # 对于一张屏幕上显示的图片,a,b,c,d 代表 4 个顶点
    #        a   ...   b     ymin
    #        .         .
    #        c   ...   d     ymax  H高度    y的范围在 [0, H-1] 间
    #        xmin    xmax
    #        W宽度   x的范围在 [0, W-1] 间

    print('load data')
    dataloader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=True, \
                                  # pin_memory=True,

                                  num_workers=opt.num_workers)

    faster_rcnn = FasterRCNNVGG16()
    print('model construct completed')

    trainer = FasterRCNNTrainer(faster_rcnn)

    if opt.load_path:
        trainer.load(opt.load_path)
        print('load pretrained model from %s' % opt.load_path)

    for epoch in range(opt.epoch):
        for ii, (img, bbox_, label_, scale) in (enumerate(dataloader)):
            print('step: ', ii)

            scale = at.scalar(scale)
            img, bbox, label = img.float(), bbox_, label_
            img, bbox, label = Variable(img), Variable(bbox), Variable(label)
            trainer.train_step(img, bbox, label, scale)

            if ((ii + 1) % opt.plot_every == 0) and (epoch > 50):
                # 运行多少步以后再predict一次,epoch跑的太少的话根本预测不准什么东西
                #                if os.path.exists(opt.debug_file):
                #                    ipdb.set_trace()

                # plot groud truth bboxes  画出原本的框
                ori_img_ = inverse_normalize(at.tonumpy(img[0]))
                gt_img = visdom_bbox(ori_img_, at.tonumpy(bbox_[0]),
                                     at.tonumpy(label_[0]))
                # gt_img  np类型  范围是 [0, 1] 间   3 x H x W
                # 这里要将 gt_img 这个带框,带标注的图像保存或者显示出来

                # plot predicti bboxes
                _bboxes, _labels, _scores = trainer.faster_rcnn.predict(
                    [ori_img_], visualize=True)
                pred_img = visdom_bbox(ori_img_, at.tonumpy(_bboxes[0]),
                                       at.tonumpy(_labels[0]).reshape(-1),
                                       at.tonumpy(_scores[0]))
예제 #12
0
def main(**kwargs):
    options._parse(kwargs)
    args = options
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpus

    dataset = TrajectoryDataset(args.trajectory_dataset,
                                obs_len=args.obs_seq_len,
                                pred_len=args.pred_seq_len,
                                skip=1,
                                norm_lap_matr=True)

    args.loader_train = DataLoader(
        dataset,
        batch_size=1,  #This is irrelative to the args batch size parameter
        shuffle=True,
        num_workers=0)

    #Defining the model
    model = social_stgcnn(n_stgcnn=args.n_stgcnn,
                          n_txpcnn=args.n_txpcnn,
                          output_feat=5,
                          seq_len=args.obs_seq_len,
                          kernel_size=args.kernel_size,
                          pred_seq_len=args.pred_seq_len)

    checkpoint_dir = './checkpoint/'
    if not os.path.exists(checkpoint_dir):
        os.makedirs(checkpoint_dir)

    with open(checkpoint_dir + 'args.pkl', 'wb') as fp:
        pickle.dump(args, fp)

    #Training settings
    optimizer = optim.SGD(model.parameters(), lr=args.trajectory_lr)
    scheduler = optim.lr_scheduler.StepLR(optimizer,
                                          step_size=args.lr_sh_rate,
                                          gamma=0.2)

    print('Data and model loaded')
    print('Checkpoint dir:', checkpoint_dir)

    #Training
    args.metrics = {'train_loss': [], 'val_loss': []}
    print('Training started ...')
    torch.save(model.state_dict(), checkpoint_dir + 'bests.pth')
    for epoch in range(args.trajectory_num_epochs):
        train(model, optimizer, epoch, args)
        scheduler.step()

        print('Epoch:', epoch)
        for k, v in args.metrics.items():
            if len(v) > 0:
                print(k, v[-1])

        with open(checkpoint_dir + 'metrics.pkl', 'wb') as fp:
            pickle.dump(args.metrics, fp)
        torch.save(model.state_dict(), checkpoint_dir + 'best.pth')
예제 #13
0
def train():
    opt._parse()
    model = Inceptionv4(n_class=2,
                        use_drop=opt.use_drop,
                        model_name=opt.model_name,
                        pre_trained=opt.pretrained_model).cuda()
    print('model construct completed')
    tf_img = utils.TransformImage(model.inception_model)
    train_dataset = PathologyDataset(opt.data_dir,
                                     mode="train",
                                     transform=tf_img)
    test_dataset = PathologyDataset(opt.data_dir,
                                    mode="test",
                                    transform=tf_img)
    print('load data')
    train_dataloader = data_.DataLoader(train_dataset,
                                        batch_size=opt.train_batch_size,
                                        shuffle=True,
                                        num_workers=opt.num_workers)
    test_dataloader = data_.DataLoader(test_dataset,
                                       batch_size=opt.test_batch_size,
                                       num_workers=opt.test_num_workers,
                                       shuffle=False,
                                       pin_memory=True)
    if opt.load_path:
        model.load(opt.load_path)
        print('load pretrained model from %s' % opt.load_path)
    best_map = 0
    lr_ = opt.lr
    optimizer = model.get_optimizer()
    avg_loss = 0

    for epoch in range(opt.epoch):
        for ii, (img, label) in tqdm(enumerate(train_dataloader)):
            img, label = img.cuda().float(), label.cuda()
            label = label.view(len(label))
            img, label = Variable(img), Variable(label)
            output = model(img)
            cls_loss = nn.CrossEntropyLoss()(output, label)
            optimizer.zero_grad()
            cls_loss.backward()
            optimizer.step()
            avg_loss += cls_loss[0].data.cpu().numpy()
            if (ii + 1) % opt.plot_every == 0:
                print("cls_loss=" + str(avg_loss / opt.plot_every))
                avg_loss = 0
        eval_result = test_model(test_dataloader, model, test_num=opt.test_num)
        if eval_result > best_map:
            best_map = eval_result
            best_path = model.save(best_map=best_map)
        if epoch == opt.epoch / 5:
            model.load(best_path)
            model.scale_lr(opt.lr_decay)
            lr_ = lr_ * opt.lr_decay
        if epoch == opt.epoch - 1:
            break
예제 #14
0
def train(**kwargs):
    opt._parse(kwargs)

    dataset = Dataset(opt)
    print('load data')
    dataloader = data_.DataLoader(dataset, \
                                  batch_size=1, \
                                  shuffle=True, \
                                  # pin_memory=True,

                                  num_workers=opt.num_workers)
    testset = TestDataset(opt)
    test_dataloader = data_.DataLoader(testset,
                                       batch_size=1,
                                       num_workers=opt.test_num_workers,
                                       shuffle=False, \
                                       pin_memory=True
                                       )
    faster_rcnn = FasterRCNNVGG16()
    print('model construct completed')
    trainer = FasterRCNNTrainer(faster_rcnn).cuda()
    if opt.load_path:
        trainer.load(opt.load_path)
        print('load pretrained model from %s' % opt.load_path)
    best_map = 0
    lr_ = opt.lr
    for epoch in range(opt.epoch):
        trainer.reset_meters()
        for ii, (img, bbox_, label_, scale) in tqdm(enumerate(dataloader)):
            scale = at.scalar(scale)

            img, bbox, label = img.cuda().float(), bbox_.cuda(), label_.cuda()
            img, bbox, label = Variable(img), Variable(bbox), Variable(label)
            trainer.train_step(img, bbox, label, scale)

            if (ii + 1) % opt.plot_every == 0:
                if os.path.exists(opt.debug_file):
                    ipdb.set_trace()

                # plot groud truth bboxes
                ori_img_ = inverse_normalize(at.tonumpy(img[0]))
                # ori_img_ = (at.tonumpy(img[0]))
                losses = trainer.get_meter_data()
                print(losses)
                write_image(ori_img_, at.tonumpy(bbox[0]), 'gt.png')
                _bboxes = trainer.faster_rcnn.predict([ori_img_],
                                                      visualize=True)
                _bboxes = at.tonumpy(_bboxes[0])
                # plot predicted bboxes
                write_image(ori_img_, _bboxes, 'pred.png')
                print('saved an image')

        if epoch == 13:
            break
예제 #15
0
 def load(self, state_dict, load_optimizer=True, parse_opt=False, ):
     if 'model' in state_dict:
         self.faster_rcnn.load_state_dict(state_dict['model'])
     else:  # legacy way, for backward compatibility
         self.faster_rcnn.load_state_dict(state_dict)
         return self
     if parse_opt:
         opt._parse(state_dict['config'])
     if 'optimizer' in state_dict and load_optimizer:
         self.optimizer.load_state_dict(state_dict['optimizer'])
     return state_dict
예제 #16
0
def train(**kwargs):
    #先获取参数
    opt._parse(**kwargs)

    #数据集
    dataset = Dataset(opt)

    dataloader = data.DataLoader(dataset,
                                 batch_size=1,
                                 shuffle=True,
                                 num_workers=opt.num_workers)
예제 #17
0
def train(**kwargs):
    parser = argparse.ArgumentParser()
    parser.add_argument("--local_rank", type=int, default=0)
    args = parser.parse_args()

    # t.distributed.init_process_group(backend="nccl", init_method="env://")
    # t.cuda.set_device(args.local_rank)
    device = t.device("cuda", args.local_rank)
    opt._parse(kwargs)

    dataset = VRDDataset(opt)
    # train_sampler = t.utils.data.distributed.DistributedSampler(dataset)
    print('load data')
    # dataloader = data_.DataLoader(dataset, \
    #                               batch_size=1, \
    #                               shuffle=False, \
    #                               # pin_memory=True,
    #                               num_workers=opt.num_workers,
    #                               sampler=train_sampler)
    dataloader = data_.DataLoader(dataset, \
                                  batch_size=1, \
                                  shuffle=True, \
                                  # pin_memory=True,

                                  num_workers=opt.num_workers)

    # word2vec_map = load_from_word2vec("test_word2vec.txt")
    word2vec_db = json.load(open("w2v.json"))

    faster_rcnn = FasterRCNNVGG16().to(device)
    faster_rcnn_trainer = FasterRCNNTrainer(faster_rcnn)
    faster_rcnn_trainer.load(opt.faster_rcnn_model)
    vrd_trainer = VGG16PREDICATES(faster_rcnn_trainer, word2vec_db,
                                  dataset.db.triplets).to(device)
    # vrd_trainer = nn.parallel.DistributedDataParallel(vrd_trainer, find_unused_parameters=True, device_ids=[args.local_rank], output_device=args.local_rank)
    optimizer = t.optim.Adam(vrd_trainer.parameters())

    for epoch in range(opt.vrd_epoch):
        total_loss = 0
        for ii, (img, D) in tqdm(enumerate(dataloader)):
            if len(img) == 0:
                continue
            if D == [] or D[0] == []:
                continue

            img = img.cuda().float()

            loss = vrd_trainer(img, D)
            total_loss += sum(loss)
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

        print(total_loss / (ii + 1))
def main(**kwargs):

    # opt config
    opt._parse(kwargs)

    # Data Loader
    data_loader = DLoader(opt)
    train_loader, test_loader, test_video = data_loader.run()

    # Train my model
    model = Resnet2D(opt, train_loader, test_loader, test_video)
    model.run()
예제 #19
0
 def load(self, path, load_optimizer=True, parse_opt=False, device='cuda'):
     state_dict = t.load(path, map_location=t.device(device))
     if 'model' in state_dict:
         self.faster_rcnn.load_state_dict(state_dict['model'])
     else:  # legacy way, for backward compatibility
         self.faster_rcnn.load_state_dict(state_dict)
         return self
     if parse_opt:
         opt._parse(state_dict['config'])
     if 'optimizer' in state_dict and load_optimizer:
         self.optimizer.load_state_dict(state_dict['optimizer'])
     return self
예제 #20
0
 def load(self, path, load_optimizer=True, parse_opt=False, ):
     state_dict = t.load(path)
     if 'model' in state_dict:
         self.faster_rcnn.load_state_dict(state_dict['model'])
     else:  # legacy way, for backward compatibility
         self.faster_rcnn.load_state_dict(state_dict)
         return self
     if parse_opt:
         opt._parse(state_dict['config'])
     if 'optimizer' in state_dict and load_optimizer:
         self.optimizer.load_state_dict(state_dict['optimizer'])
     return self
예제 #21
0
def main(**kwargs):
    opt._parse(kwargs)
    # Initialize Logger
    if opt.benchmark_path is None:
        timestr = time.strftime('%m%d%H%M')
        benchmark_path = f'logs/fasterrcnn_{timestr}'
        for k_, v_ in kwargs.items():
            benchmark_path += f'_{v_}'
        benchmark_path += '.log'

    Logger(benchmark_path, logging.INFO)
    logger = logging.getLogger(__name__)
    Logger.section_break(title='Benchmark Model')
    logger.info(f'User Arguments\n{opt._state_dict()}')

    # Load dataset
    dataset = TestDataset(opt, split='test')
    dataloader = data_.DataLoader(dataset,
                                  batch_size=1,
                                  num_workers=opt.test_num_workers,
                                  shuffle=False,
                                  pin_memory=True)

    logger.info(f"DATASET SIZE: {len(dataloader)}")
    logger.info("Using Mask VGG") if opt.mask else logger.info(
        "Using normal VGG16")

    # Construct model
    faster_rcnn = FasterRCNNVGG16(mask=opt.mask)
    trainer = FasterRCNNTrainer(faster_rcnn).cuda()
    Logger.section_break(title='Model')
    logger.info(str(faster_rcnn))

    # Resume from a checkpoint
    if opt.load_path:
        assert os.path.isfile(opt.load_path),\
            'Checkpoint {} does not exist.'.format(opt.load_path)

        trainer.load(opt.load_path)
        Logger.section_break('Checkpoint')
        logger.info("Loaded checkpoint '{}' (epoch X)".format(opt.load_path))

    # Benchmark dataset
    fps = AverageMeter()
    benchmarker = {FPS: fps}
    result = benchmark(benchmarker, dataloader, faster_rcnn, test_num=1000)
    Logger.section_break('Benchmark completed')
    model_parameters = filter(lambda p: p.requires_grad,
                              faster_rcnn.parameters())
    params = sum([np.prod(p.size()) for p in model_parameters])
    logger.info('[PARAMETERS] {params}'.format(params=params))
    logger.info('[RUN TIME] {time.avg:.3f} sec/frame'.format(time=result[FPS]))
예제 #22
0
    def load(self, path):

        '''
            Loads the model
            Arguments:
                path :- load path
        '''
        
        state_dict = torch.load(path, map_location=torch.device('cpu'))
        self.load_state_dict(state_dict['model'])
        self.optimizer.load_state_dict(state_dict['optimizer'])
        opt._parse(state_dict['config'])
        return self
예제 #23
0
    def load(self, path, load_optimizer=True, parse_opt=False):
        state_dict = t.load(path)
        if 'model' in state_dict:
            self.faster_rcnn.load_state_dict(state_dict['model'])
        else:
            self.faster_rcnn.load_state_dict(state_dict)
            return self

        if parse_opt:
            opt._parse(state_dict['config'])
        if 'optimizer' in state_dict and load_optimizer:
            self.load_state_dict(state_dict['optimizer'])
        return self
예제 #24
0
 def load(self, path, load_optimizer=True, parse_opt=False):
     state_dict = torch.load(path)
     if 'model' in state_dict:
         self.faster_rcnn.load_state_dict(state_dict['model'])
     else:
         self.faster_rcnn.load_state_dict(state_dict)
         return self
     if parse_opt:
         opt._parse(state_dict['config'])
     if 'optimizer' in state_dict and load_optimizer:
         self.optimizer.load_state_dict(state_dict['optimizer'])
     self.epochs = state_dict['epochs'] + 1
     self.best_map = state_dict['best_map']
     return self
예제 #25
0
def train(individual, **kwargs):
    opt._parse(kwargs)

    dataset = Dataset(opt)
    print('load data')
    dataloader = data_.DataLoader(dataset,
                                  batch_size=1,
                                  shuffle=True,
                                  num_workers=opt.num_workers)
    testset = TestDataset(opt)
    test_dataloader = data_.DataLoader(testset,
                                       batch_size=1,
                                       num_workers=opt.test_num_workers,
                                       shuffle=False,
                                       pin_memory=True)
    faster_rcnn = FasterRCNN_mine(individual)
    print('model construct completed')
    trainer = FasterRCNNTrainer(faster_rcnn).cuda()
    best_map = 0
    lr_ = opt.lr
    for epoch in range(opt.epoch):
        trainer.reset_meters()

        for ii, (img, bbox_, label_, scale) in tqdm(enumerate(dataloader)):
            scale = at.scalar(scale)

            img, bbox, label = img.cuda().float(), bbox_.cuda(), label_.cuda()
            trainer.train_step(img, bbox, label, scale)

            if (ii + 1) % opt.plot_every == 0:
                if os.path.exists(opt.debug_file):
                    ipdb.set_trace()

                ori_img_ = inverse_normalize(at.tonumpy(img[0]))

                _bboxes, _labels, _scores = trainer.faster_rcnn.predict(
                    [ori_img_], visualize=True)

        eval_result = eval(test_dataloader, faster_rcnn, test_num=opt.test_num)
        lr_ = trainer.faster_rcnn.optimizer.param_groups[0]['lr']
        best_path = None
        if eval_result['map'] > best_map:
            best_map = eval_result['map']
            best_path = trainer.save(best_map=best_map)
        if epoch == 9:
            trainer.load(best_path)
            trainer.faster_rcnn.scale_lr(opt.lr_decay)
            lr_ = lr_ * opt.lr_decay

        individual.accuracy = best_map
def eval_main(**kwargs):
    opt._parse(kwargs)
    opt.test_num = 10000
    opt.caffe_pretrain = True

    types = get_types()
    id_files = get_id_list_files()
    img_dirs = get_img_dirs()
    anno_dirs = get_anno_dirs()

    results_file = 'oversampled-orig-classm.csv'
    if os.path.exists(results_file):
        file = open(results_file, "w+")
    else:
        file = open(results_file, "w")
        columns = init_cols()
        writer = csv.DictWriter(file, fieldnames=columns)
        writer.writeheader()

    for category, id_file, img_dir, anno_dir in zip(types, id_files, img_dirs,
                                                    anno_dirs):
        testset = TestTypeDataset(opt,
                                  use_difficult=True,
                                  id_file=id_file,
                                  img_dir=img_dir,
                                  anno_dir=anno_dir)
        test_dataloader = data_.DataLoader(testset,
                                           batch_size=1,
                                           num_workers=opt.test_num_workers,
                                           shuffle=False, \
                                           pin_memory=True)

        faster_rcnn = FasterRCNNVGG16()
        trainer = FasterRCNNTrainer(faster_rcnn).cuda()
        if opt.load_path:
            trainer.load(opt.load_path)
            print('load pretrained model from %s' % opt.load_path)
        else:
            print("provide path of the checkpoint to be loaded.")
            exit()
        print(category)
        eval_result = eval(test_dataloader,
                           faster_rcnn,
                           category,
                           file,
                           test_num=opt.test_num)
        print('test_map', eval_result['map'])

    file.close()
예제 #27
0
파일: trainer.py 프로젝트: Grishma31/GAN
    def load(self, path, load_optimizer=True, parse_opt=False):
        print("Loading the modules")
        state_dict = t.load(path, map_location='cpu')
        print("Module is loaded successfully.")
        if 'model' in state_dict:
            self.faster_rcnn.load_state_dict(state_dict['model'])
        else:
            self.faster_rcnn.load_state_dict(state_dict)
            return self

        if parse_opt:
            opt._parse(state_dict['config'])
        if 'optimizer' in state_dict and load_optimizer:
            self.load_state_dict(state_dict['optimizer'])
        return self
예제 #28
0
def main(**kwargs):
    options._parse(kwargs)
    opt = options
    torch.manual_seed(317)

    print('Setting up data...')

    transforms = T.Compose([T.ToTensor()])
    dataset = MotDataset(opt, (640, 480), augment=True, transforms=transforms)
    opt = opt.update_dataset_info_and_set_heads(opt, dataset)
    print(opt)


    os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpus
    opt.device = torch.device('cuda' if opt.gpus >= '0' else 'cpu')

    print('Creating model...')
    model = create_model('dla_34', opt.heads, opt.head_conv)
    optimizer = torch.optim.Adam(model.parameters(), opt.lr)
    start_epoch = 0

    # Get dataloader

    train_loader = torch.utils.data.DataLoader(
        dataset,
        batch_size=opt.batch_size,
        shuffle=True,
        num_workers=opt.num_workers,
        pin_memory=True,
        drop_last=True
    )

    print('Starting training...')
    trainer = BaseTrainer(opt, model, optimizer)
    trainer.set_device(opt.gpus, -1, opt.device)

    if opt.multi_load_model != '':
      model, optimizer = load_model(model, opt.multi_load_model, trainer.optimizer)

    best = 1e10
    for epoch in range(start_epoch + 1, opt.num_epochs + 1):
        
        log_dict_train, _ = trainer.train(epoch, train_loader)
        if epoch % opt.save_every == 0:
            lr = opt.lr * 0.5
            for param_group in optimizer.param_groups:
                param_group['lr'] = lr
            save_model(os.path.join(opt.save_dir, 'model_{}.pth'.format(epoch)), epoch, model, optimizer)
예제 #29
0
파일: train.py 프로젝트: TWSFar/Faster-RCNN
    def __init__(self, **kwargs):
        opt._parse(kwargs)
        self.opt = opt
        self.test_num = self.opt.test_num
        self.device, self.device_id = select_device(is_head=True)
        # Define Dataloader
        print("load data")
        self.train_dataset = Datasets(opt, mode='train')
        self.train_loader = DataLoader(self.train_dataset,
                                       batch_size=opt.batch_size,
                                       shuffle=True,
                                       num_workers=opt.num_workers)
        self.val_dataset = Datasets(opt, mode='val')
        self.val_loader = DataLoader(self.val_dataset,
                                     batch_size=opt.batch_size,
                                     shuffle=False,
                                     pin_memory=True,
                                     num_workers=opt.test_num_workers)
        self.num_batch = len(self.train_loader)

        # Define Network
        # initilize the network here.
        print("define network")
        faster_rcnn = FasterRCNNVGG16()
        self.trainer = FasterRCNNTrainer(faster_rcnn)

        # Resuming Checkpoint
        self.start_epoch = 0
        self.best_map = 0
        self.lr = opt.lr
        if opt.load_path:
            self.trainer.load(opt.load_path)
            self.start_epoch = self.trainer.start_epoch
            self.best_map = self.trainer.best_map
            print('load pretrained model from %s' % opt.load_path)

        # Use multiple GPU
        if opt.use_mgpu and len(self.device_id) > 1:
            self.trainer = torch.nn.DataParallel(self.trainer,
                                                 device_ids=self.device_id)
            print("Using multiple gpu")
        else:
            self.trainer = self.trainer.to(self.device)

        # Visdom
        self.trainer.vis.text(self.train_dataset.classes, win='labels')
예제 #30
0
 def load(
     self,
     path,
     load_optimizer=True,
     parse_opt=False,
 ):
     state_dict = t.load(path)
     if "model" in state_dict:
         self.faster_rcnn.load_state_dict(state_dict["model"])
     else:  # legacy way, for backward compatibility
         self.faster_rcnn.load_state_dict(state_dict)
         return self
     if parse_opt:
         opt._parse(state_dict["config"])
     if "optimizer" in state_dict and load_optimizer:
         self.optimizer.load_state_dict(state_dict["optimizer"])
     return self
예제 #31
0
def run(**kwargs):
    '''
        Runs the optical flow model which includes the training and validation.
    '''

    opt._parse(kwargs)
    opt.number_gpus = torch.cuda.device_count()
    if opt.number_gpus == 0:
        opt.effective_batch_size = opt.batch_size
    else:
        opt.effective_batch_size = opt.batch_size * opt.number_gpus

    opt.cuda = torch.cuda.is_available()

    training_dataset = Dataset(opt.dataset_dir + 'training')
    training_dataloader = DataLoader(training_dataset,
                                     batch_size=opt.effective_batch_size,
                                     shuffle=True,
                                     num_workers=opt.number_workers)

    validation_dataset = Dataset(opt.dataset_dir + 'validation')
    validation_dataloader = DataLoader(validation_dataset,
                                       batch_size=opt.effective_batch_size,
                                       shuffle=True,
                                       num_workers=opt.number_workers)
    opticalFlow = OpticalFlow()
    if opt.cuda and opt.number_gpus == 1:
        opticalFlow = opticalFlow.cuda()

    if opt.load_path:
        opticalFlow.load(opt.load_path)

    best_err = 100000
    for epoch in range(opt.total_epochs):
        training_loss, batch_idex = opticalFlow.train(training_dataloader)

        validation_loss, batch_idex = opticalFlow.validate(
            validation_dataloader)

        is_best = False
        if validation_loss < best_err:
            best_err = validation_loss
            is_best = True

        opticalFlow.save(is_best)
예제 #32
0
def train(**kwargs):
    opt._parse(kwargs)

    dataset = Dataset(opt)
    print('load data')
    dataloader = data_.DataLoader(dataset, \
                                  batch_size=1, \
                                  shuffle=True, \
                                  # pin_memory=True,
                                  num_workers=opt.num_workers)
    testset = TestDataset(opt)
    test_dataloader = data_.DataLoader(testset,
                                       batch_size=1,
                                       num_workers=opt.test_num_workers,
                                       shuffle=False, \
                                       pin_memory=True
                                       )
    faster_rcnn = FasterRCNNVGG16()
    print('model construct completed')
    trainer = FasterRCNNTrainer(faster_rcnn).cuda()
    if opt.load_path:
        trainer.load(opt.load_path)
        print('load pretrained model from %s' % opt.load_path)

    trainer.vis.text(dataset.db.label_names, win='labels')
    best_map = 0
    for epoch in range(opt.epoch):
        trainer.reset_meters()
        for ii, (img, bbox_, label_, scale) in tqdm(enumerate(dataloader)):
            scale = at.scalar(scale)
            img, bbox, label = img.cuda().float(), bbox_.cuda(), label_.cuda()
            img, bbox, label = Variable(img), Variable(bbox), Variable(label)
            trainer.train_step(img, bbox, label, scale)

            if (ii + 1) % opt.plot_every == 0:
                if os.path.exists(opt.debug_file):
                    ipdb.set_trace()

                # plot loss
                trainer.vis.plot_many(trainer.get_meter_data())

                # plot groud truth bboxes
                ori_img_ = inverse_normalize(at.tonumpy(img[0]))
                gt_img = visdom_bbox(ori_img_,
                                     at.tonumpy(bbox_[0]),
                                     at.tonumpy(label_[0]))
                trainer.vis.img('gt_img', gt_img)

                # plot predicti bboxes
                _bboxes, _labels, _scores = trainer.faster_rcnn.predict([ori_img_], visualize=True)
                pred_img = visdom_bbox(ori_img_,
                                       at.tonumpy(_bboxes[0]),
                                       at.tonumpy(_labels[0]).reshape(-1),
                                       at.tonumpy(_scores[0]))
                trainer.vis.img('pred_img', pred_img)

                # rpn confusion matrix(meter)
                trainer.vis.text(str(trainer.rpn_cm.value().tolist()), win='rpn_cm')
                # roi confusion matrix
                trainer.vis.img('roi_cm', at.totensor(trainer.roi_cm.conf, False).float())
        eval_result = eval(test_dataloader, faster_rcnn, test_num=opt.test_num)

        if eval_result['map'] > best_map:
            best_map = eval_result['map']
            best_path = trainer.save(best_map=best_map)
        if epoch == 9:
            trainer.load(best_path)
            trainer.faster_rcnn.scale_lr(opt.lr_decay)

        trainer.vis.plot('test_map', eval_result['map'])
        lr_ = trainer.faster_rcnn.optimizer.param_groups[0]['lr']
        log_info = 'lr:{}, map:{},loss:{}'.format(str(lr_),
                                                  str(eval_result['map']),
                                                  str(trainer.get_meter_data()))
        trainer.vis.log(log_info)
        if epoch == 13: 
            break