示例#1
0
def main(opt):
    if not os.path.exists(opt.resume):
        os.makedirs(opt.resume)
    if not os.path.exists(opt.logroot):
        os.makedirs(opt.logroot)
    
    
    
    log_dir_name = str(opt.manualSeed)+'/'
    log_path = os.path.join(opt.logroot,log_dir_name)
    opt.resume = os.path.join(opt.resume,log_dir_name)
    if not os.path.exists(log_path):
        os.makedirs(log_path)
    #log_file_name = log_path + 'ucf_log_st.txt'
    #log_file_name = opt.logroot + 'ucf_log_st_'+str(opt.manualSeed)+'.txt'

    log_file_name = opt.logroot+'something_log_v4.1_'+str(opt.manualSeed)+'.txt'


    with open(log_file_name,'a+') as file:
        file.write('manualSeed is %d \n' % opt.manualSeed)
    paths = config.Paths()

    train_datalist = '/home/mcislab/wangruiqi/IJCV2019/data/newsomething-trainlist.txt'
    test_datalist = '/home/mcislab/wangruiqi/IJCV2019/data/newsomething-vallist.txt'
    #test_datalist = '/home/mcislab/wangruiqi/IJCV2019/data/newsomething-check.txt'
    #opt.resume = os.path.join(opt.resume,log_dir_name) 

    train_dataset = dataset(train_datalist, paths.detect_root1, paths.img_root, opt)
    '''
    train_dataloader = DataLoader(train_dataset, collate_fn=utils.collate_fn_sth, batch_size=opt.batch_size, shuffle=True, num_workers=opt.workers, \
                                   drop_last=False)
    '''
    train_dataloader = DataLoader(train_dataset, batch_size=opt.batch_size, shuffle=True, num_workers=opt.workers, \
                                   drop_last=False)
    #
    test_dataset = dataset(test_datalist, paths.detect_root1, paths.img_root, opt)
    '''
    test_dataloader = DataLoader(test_dataset, collate_fn=utils.collate_fn_sth, batch_size=opt.batch_size, \
                                 shuffle=False, num_workers=opt.workers, drop_last=False)
    '''
    test_dataloader = DataLoader(test_dataset, batch_size=opt.batch_size, shuffle=False, num_workers=opt.workers, drop_last=False)
    
    model = sthv1_model_ablation.Model(opt)
    print(model)
    #exit()
    
    '''
    if opt.show:
        show(model)
        exit()
    '''

    optimizer = optim.Adam(model.parameters(), lr=opt.lr, weight_decay=opt.weight_decay)
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=100, gamma=0.9)
    criterion1 = nn.CrossEntropyLoss()
    criterion2 = nn.NLLLoss()
    if opt.cuda:
        model.cuda()
        #criterion.cuda(opt.device_id)
        criterion1.cuda()
        criterion2.cuda()

    
    '''
    if opt.epoch != 0:
        if os.path.exists('./models/hmdb_split1/'+checkpoint_model_name):
            model.load_state_dict(torch.load('./models/hmdb_split1/' + checkpoint_model_name))
        else:
            print('model not found')
            exit()
    '''
    #Lin commented on Sept. 2nd
    #model.double()


    writer = SummaryWriter(log_dir=os.path.join(log_path,'runs/'))
    # For training
    sum_test_acc = []
    best_acc = 0.
    #epoch_errors = list()
    avg_epoch_error = np.inf
    best_epoch_error = np.inf
    
    
    '''
    #haha, output Acc for each class
    test_load_dir = opt.resume
    #test_load_dir = '/home/mcislab/linhanxi/IJCV19_Experiments/sth_scale/something_scale5_M/ckpnothresh/ours'
    model.load_state_dict(torch.load(os.path.join(test_load_dir, 'model_best.pth'))['state_dict'])
    if opt.featdir:
        model.feat_mode()
    test_acc, output = test(0,test_dataloader, model, criterion1, criterion2, opt, writer, test_load_dir, is_test=True)
    exit()
    '''
    print ("Test once to get a baseline.")
    loaded_checkpoint = utils.load_best_checkpoint(opt, model, optimizer)
    if loaded_checkpoint:
        opt, model, optimizer = loaded_checkpoint
        test_acc, output = test(51,test_dataloader, model, criterion1, criterion2, opt, writer, log_file_name, is_test=True)
        tmp_test_acc = np.mean(test_acc)
        if tmp_test_acc > best_acc:
            
            best_acc = tmp_test_acc
      
    print ("Start to train.....")
    for epoch_i in range(opt.epoch, opt.niter):
        scheduler.step()

        train(epoch_i, train_dataloader, model, criterion1, criterion2, optimizer, opt, writer, log_file_name)
        #val_acc, val_out, val_error =test(valid_loader, model, criterion1,criterion2, opt, log_file_name, is_test=False)
        # Lin changed according to 'sth_pre_abl1' on Sept. 3rd
        test_acc, output = test(epoch_i,test_dataloader, model, criterion1, criterion2, opt, writer, log_file_name, is_test=True)
        #test_acc,_ = test(test_dataloader, model, criterion1, criterion2, opt, log_file_name, is_test=True)
        
        tmp_test_acc = np.mean(test_acc)
        sum_test_acc.append(test_acc)
     
        if tmp_test_acc > best_acc:
            is_best = True
            best_acc = tmp_test_acc

        else:
            is_best = False

        utils.save_checkpoint({'epoch': epoch_i, 'state_dict': model.state_dict(), 'optimizer': optimizer.state_dict()},
                              is_best=is_best, directory=opt.resume)
        print ("A training epoch finished!")
  
    #epoch_i =33
 
    # For testing
    print ("Training finished.Start to test.")
    loaded_checkpoint = utils.load_best_checkpoint(opt, model, optimizer)
    if loaded_checkpoint:
        opt, model, optimizer = loaded_checkpoint
    # Lin changed according to 'sth_pre_abl1' on Sept. 3rd
    test_acc,output = test(epoch_i,test_dataloader, model, criterion1, criterion2, opt, writer, log_file_name, is_test=True)
    #test_acc,output = test(test_dataloader, model, criterion1,criterion2,  opt, log_file_name, is_test=True)
    print ("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
    print ("ratio=0.1, test Accuracy:   %.2f " % (100. * test_acc[0][0]))
    print ("ratio=0.2, test Accuracy:   %.2f " % (100. * test_acc[0][1]))
    print ("ratio=0.3, test Accuracy:   %.2f " % (100. * test_acc[0][2]))
    print ("ratio=0.4, test Accuracy:   %.2f " % (100. * test_acc[0][3]))
    print ("ratio=0.5, test Accuracy:   %.2f " % (100. * test_acc[0][4]))
    print ("ratio=0.6, test Accuracy:   %.2f " % (100. * test_acc[0][5]))
    print ("ratio=0.7, test Accuracy:   %.2f " % (100. * test_acc[0][6]))
    print ("ratio=0.8, test Accuracy:   %.2f " % (100. * test_acc[0][7]))
    print ("ratio=0.9, test Accuracy:   %.2f " % (100. * test_acc[0][8]))
    print ("ratio=1.0, test Accuracy:   %.2f " % (100. * test_acc[0][9]))
    print ("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
示例#2
0
    def evaluate_detections(self, all_boxes, output_dir):
        self._write_voc_results_file(all_boxes)
        self._do_python_eval(output_dir)
        if self.config['matlab_eval']:
            self._do_matlab_eval(output_dir)
        if self.config['cleanup']:
            for cls in self._classes:
                if cls == '__background__':
                    continue
                filename = self._get_voc_results_file_template().format(cls)
                os.remove(filename)

    def competition_mode(self, on):
        if on:
            self.config['use_salt'] = False
            self.config['cleanup'] = False
        else:
            self.config['use_salt'] = True
            self.config['cleanup'] = True


if __name__ == '__main__':
    from datasets.dataset import dataset

    d = dataset('trainval')
    res = d.roidb
    from IPython import embed

    embed()
示例#3
0
    print('Called with args:')
    print(args)

    if args.cfg_file is not None:
        cfg_from_file(args.cfg_file)
    if args.set_cfgs is not None:
        cfg_from_list(args.set_cfgs)

    cfg.GPU_ID = args.gpu_id

    print('Using config:')
    pprint.pprint(cfg)

    if not args.randomize:
        np.random.seed(cfg.RNG_SEED)
        caffe.set_random_seed(cfg.RNG_SEED)

    caffe.set_mode_gpu()
    caffe.set_device(args.gpu_id)

    ds = dataset(args.img_label_size, args.cache)
    roidb = ds.get_roidb()
    print '{:d} roidb entries'.format(len(roidb))

    train_net(args.solver,
              roidb,
              solverstate=args.snapshot,
              pretrained_model=args.pretrained_model,
              max_iters=args.max_iters)
示例#4
0
def get_roidb(img_label_size, cache):
    ds = dataset(img_label_size, cache)
    roidb = ds.get_roidb()

    return roidb
示例#5
0
def testing(imdbval_name, classes, cfg_file, model, weights, tag, net,
            max_per_image):

    __sets = {}

    for split in ['train', 'val', 'trainval', 'test']:
        name = imdbval_name.split('_')[0] + '_{}'.format(split)
        __sets[name] = (lambda split=split: dataset(split, classes,
                                                    name.split('_')[0]))

    if cfg_file is not None:
        cfg_from_file(cfg_file)

    print('Using config:')
    pprint.pprint(cfg)

    # if has model, get the name from it
    # if does not, then just use the inialization weights
    if model:
        filename = os.path.splitext(os.path.basename(model))[0]
    else:
        filename = os.path.splitext(os.path.basename(weights))[0]

    tag = tag if tag else 'default'
    filename = tag + '/' + filename
    imdb = get_imdb(imdbval_name, __sets)

    tfconfig = tf.ConfigProto(allow_soft_placement=True)
    tfconfig.gpu_options.allow_growth = True

    # init session
    sess = tf.Session(config=tfconfig)
    # load network
    if net == 'vgg16':
        net = vgg16(batch_size=1)
    elif net == 'res50':
        net = resnetv1(batch_size=1, num_layers=50)
    elif net == 'res101':
        net = resnetv1(batch_size=1, num_layers=101)
    elif net == 'res152':
        net = resnetv1(batch_size=1, num_layers=152)
    else:
        raise NotImplementedError

    # load model
    net.create_architecture(sess,
                            "TEST",
                            imdb.num_classes,
                            tag='default',
                            anchor_scales=cfg.ANCHOR_SCALES,
                            anchor_ratios=cfg.ANCHOR_RATIOS)

    if model:
        print(('Loading model check point from {:s}').format(model))
        saver = tf.train.Saver()
        saver.restore(sess, model)
        print('Loaded.')
    else:
        print(('Loading initial weights from {:s}').format(weights))
        sess.run(tf.global_variables_initializer())
        print('Loaded.')

    test_net(sess, net, imdb, filename, max_per_image=max_per_image)

    sess.close()