Esempio n. 1
0
def main(opt):
    if not os.path.exists(opt.resume):
        os.makedirs(opt.resume)
    if not os.path.exists(opt.logroot):
        os.makedirs(opt.logroot)

    log_dir_name = str(opt.manualSeed) + '/'
    log_path = os.path.join(opt.logroot, log_dir_name)
    opt.resume = os.path.join(opt.resume, log_dir_name)
    if not os.path.exists(log_path):
        os.makedirs(log_path)
    #log_file_name = log_path + 'ucf_log_st.txt'
    #log_file_name = opt.logroot + 'ucf_log_st_'+str(opt.manualSeed)+'.txt'

    log_file_name = opt.logroot + 'something_log_v4.1_' + str(
        opt.manualSeed) + '.txt'

    with open(log_file_name, 'a+') as file:
        file.write('manualSeed is %d \n' % opt.manualSeed)
    paths = config.Paths()

    train_datalist = "/home/mcislab/zhaojw/AAAI/sth_train_list.txt"
    val_datalist = "/home/mcislab/zhaojw/AAAI/sth_val_list.txt"
    test_datalist = "/home/mcislab/zhaojw/AAAI/sth_test_list.txt"
    #test_datalist = '/home/mcislab/wangruiqi/IJCV2019/data/newsomething-check.txt'
    #opt.resume = os.path.join(opt.resume,log_dir_name)

    train_dataset = dataset(train_datalist, paths.sthv2_final, opt)

    train_dataloader = DataLoader(train_dataset,
                                  batch_size=opt.batch_size,
                                  shuffle=True,
                                  num_workers=opt.workers,
                                  drop_last=False)

    val_dataset = dataset(val_datalist, paths.sthv2_final, opt)

    val_dataloader = DataLoader(val_dataset,
                                batch_size=opt.batch_size,
                                shuffle=False,
                                num_workers=opt.workers,
                                drop_last=False)

    test_dataset = dataset(test_datalist, paths.sthv2_final, opt)

    test_dataloader = DataLoader(test_dataset,
                                 batch_size=opt.batch_size,
                                 shuffle=False,
                                 num_workers=opt.workers,
                                 drop_last=False)

    model = sthv2_model.Model(opt)
    '''
    if opt.show:
        show(model)
        exit()
    '''

    optimizer = optim.SGD(model.parameters(), lr=opt.lr, momentum=opt.momentum)
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
                                                step_size=100,
                                                gamma=0.9)
    criterion1 = nn.CrossEntropyLoss()
    criterion2 = nn.NLLLoss()
    if opt.cuda:
        model.cuda()
        #criterion.cuda(opt.device_id)
        criterion1.cuda()
        criterion2.cuda()
    '''
    if opt.epoch != 0:
        if os.path.exists('./models/hmdb_split1/'+checkpoint_model_name):
            model.load_state_dict(torch.load('./models/hmdb_split1/' + checkpoint_model_name))
        else:
            print('model not found')
            exit()
    '''
    #Lin commented on Sept. 2nd
    #model.double()

    writer = SummaryWriter(log_dir=os.path.join(log_path, 'runs/'))
    # For training
    sum_test_acc = []
    best_acc = 0.
    #epoch_errors = list()
    avg_epoch_error = np.inf
    best_epoch_error = np.inf
    '''
    #haha, output Acc for each class
    test_load_dir = opt.resume
    #test_load_dir = '/home/mcislab/linhanxi/IJCV19_Experiments/sth_scale/something_scale5_M/ckpnothresh/ours'
    model.load_state_dict(torch.load(os.path.join(test_load_dir, 'model_best.pth'))['state_dict'])
    if opt.featdir:
        model.feat_mode()
    test_acc, output = test(0,test_dataloader, model, criterion1, criterion2, opt, writer, test_load_dir, is_test=True)
    exit()
    '''
    print("Test once to get a baseline.")
    loaded_checkpoint = utils.load_best_checkpoint(opt, model, optimizer)
    if loaded_checkpoint:
        opt, model, optimizer = loaded_checkpoint
        test_acc, output = test(51,
                                test_dataloader,
                                model,
                                criterion1,
                                criterion2,
                                opt,
                                writer,
                                log_file_name,
                                is_test=True)
        tmp_test_acc = np.mean(test_acc)
        if tmp_test_acc > best_acc:

            best_acc = tmp_test_acc

    print("Start to train.....")
    for epoch_i in range(opt.epoch, opt.niter):
        scheduler.step()

        train(epoch_i, train_dataloader, model, criterion1, criterion2,
              optimizer, opt, writer, log_file_name)
        #val_acc, val_out, val_error =test(valid_loader, model, criterion1,criterion2, opt, log_file_name, is_test=False)
        # Lin changed according to 'sth_pre_abl1' on Sept. 3rd
        test_acc, output = val(epoch_i,
                               val_dataloader,
                               model,
                               criterion1,
                               criterion2,
                               opt,
                               writer,
                               log_file_name,
                               is_test=True)
        #test_acc,_ = test(test_dataloader, model, criterion1, criterion2, opt, log_file_name, is_test=True)

        tmp_test_acc = np.mean(test_acc)
        sum_test_acc.append(test_acc)

        if tmp_test_acc > best_acc:
            is_best = True
            best_acc = tmp_test_acc

        else:
            is_best = False

        utils.save_checkpoint(
            {
                'epoch': epoch_i,
                'state_dict': model.state_dict(),
                'optimizer': optimizer.state_dict()
            },
            is_best=is_best,
            directory=opt.resume)
        print("A training epoch finished!")

    #epoch_i =33

    # For testing
    print("Training finished.Start to test.")
    loaded_checkpoint = utils.load_best_checkpoint(opt, model, optimizer)
    if loaded_checkpoint:
        opt, model, optimizer = loaded_checkpoint
    # Lin changed according to 'sth_pre_abl1' on Sept. 3rd
    test_acc, output = test(epoch_i,
                            test_dataloader,
                            model,
                            criterion1,
                            criterion2,
                            opt,
                            writer,
                            log_file_name,
                            is_test=True)
    #test_acc,output = test(test_dataloader, model, criterion1,criterion2,  opt, log_file_name, is_test=True)
    print("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
    print("ratio=0.1, test Accuracy:   %.2f " % (100. * test_acc[0][0]))
    print("ratio=0.2, test Accuracy:   %.2f " % (100. * test_acc[0][1]))
    print("ratio=0.3, test Accuracy:   %.2f " % (100. * test_acc[0][2]))
    print("ratio=0.4, test Accuracy:   %.2f " % (100. * test_acc[0][3]))
    print("ratio=0.5, test Accuracy:   %.2f " % (100. * test_acc[0][4]))
    print("ratio=0.6, test Accuracy:   %.2f " % (100. * test_acc[0][5]))
    print("ratio=0.7, test Accuracy:   %.2f " % (100. * test_acc[0][6]))
    print("ratio=0.8, test Accuracy:   %.2f " % (100. * test_acc[0][7]))
    print("ratio=0.9, test Accuracy:   %.2f " % (100. * test_acc[0][8]))
    print("ratio=1.0, test Accuracy:   %.2f " % (100. * test_acc[0][9]))
    print("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
Esempio n. 2
0
def main(opt):
    if not os.path.exists(opt.resume):
        os.makedirs(opt.resume)
    if not os.path.exists(opt.logroot):
        os.makedirs(opt.logroot)
    #log_dir_name = 'split'+opt.split + '/'
    log_dir_name = 'split'+opt.split + '/'+str(opt.manualSeed)+'/'
    opt.resume = os.path.join(opt.resume,log_dir_name)
    log_path = os.path.join(opt.logroot,log_dir_name)
    if not os.path.exists(log_path):
        os.makedirs(log_path)
    #log_file_name = log_path + 'ucf_log_st.txt'
    log_file_name = log_path + 'ucf_log_v5.0_st_'+str(opt.manualSeed)+'.txt'

    with open(log_file_name,'a+') as file:
        file.write('manualSeed is %d \n' % opt.manualSeed)
        file.write('state_dim is %d \n' % opt.state_dim)
        file.write('num_bottleneck is %d \n' % opt.num_bottleneck)
    paths = config.Paths()

    train_datalist = '/home/mcislab/wangruiqi/IJCV2019/data/ucf101Vid_train_lin_split'+opt.split+'.txt'
    test_datalist = '/home/mcislab/wangruiqi/IJCV2019/data/ucf101Vid_val_lin_split'+opt.split+'.txt'
    #train_datalist = '/home/mcislab/wangruiqi/IJCV2019/data/test.txt'
    #test_datalist = '/home/mcislab/wangruiqi/IJCV2019/data/test.txt'
    #train_dataset = dataset(train_datalist, paths.detect_root_ucf_mmdet, paths.img_root_ucf,paths.rgb_res18_ucf,paths.rgb_res18_ucf, opt)
    train_dataset = dataset(train_datalist, paths.bninception_ucf,opt)####zhao changed ###paths.resnet50_ucf_rgbflow_same
    train_dataloader = DataLoader(train_dataset, batch_size=opt.batch_size, shuffle=True, num_workers=opt.workers, drop_last=False)
    #test_dataset = dataset(test_datalist, paths.detect_root_ucf_mmdet, paths.img_root_ucf,paths.rgb_res18_ucf,paths.rgb_res18_ucf, opt)
    test_dataset = dataset(test_datalist, paths.bninception_ucf,opt)###paths.resnet50_ucf_rgbflow_same
    test_dataloader = DataLoader(test_dataset, batch_size=opt.batch_size, shuffle=False, num_workers=opt.workers, drop_last=False)

    model = ucf_main_without_systhesizing_model.Model(opt)
    #optimizer = optim.Adam(model.parameters(), lr=opt.lr, weight_decay=opt.weight_decay)
    optimizer = optim.SGD(model.parameters(), lr=opt.lr,momentum=opt.momentum)
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=200, gamma=0.9)
    criterion1 = nn.CrossEntropyLoss()
    criterion2 = nn.NLLLoss()

    if opt.cuda:
        
        model.cuda()
        #criterion.cuda(opt.device_id)
        criterion1.cuda()
        criterion2.cuda()

    
    '''
    if opt.epoch != 0:
        if os.path.exists('./models/hmdb_split1/'+checkpoint_model_name):
            model.load_state_dict(torch.load('./models/hmdb_split1/' + checkpoint_model_name))
        else:
            print('model not found')
            exit()
    '''
    #Lin commented on Sept. 2nd
    #model.double()


    writer = SummaryWriter(log_dir=log_path+'runs/')
    # For training
    sum_test_acc = []
    best_acc = 0.
    epoch_errors = list()
    avg_epoch_error = np.inf
    best_epoch_error = np.inf
    '''
    #haha, output Acc for each class
    model.load_state_dict(torch.load('/home/mcislab/linhanxi/ucf101_NewFeat_RGBtuned/ckpnothresh/ours/model_best.pth')['state_dict'])
    test_acc, output = test(0,test_dataloader, model, criterion1, criterion2, opt, writer, log_file_name, is_test=True)
    exit()
    
    #load last experiment best model
    print("load last experiment best model")
    model.load_state_dict(torch.load('/home/mcislab/zhaojw/AAAI/prediction2020/models/ucf101_res50/split1/1050/model_best.pth')['state_dict'])
    test_acc, output = test(0,test_dataloader, model, criterion1, criterion2, opt, writer, log_file_name, is_test=True)
    '''
    
    print ("Test once for a baseline.")
    loaded_checkpoint =utils.load_best_checkpoint(opt, model, optimizer)
    if loaded_checkpoint:
        #opt, model, optimizer = loaded_checkpoint
        opt, model, __ = loaded_checkpoint
        test_acc, output = test(1,test_dataloader, model, criterion1, criterion2, opt, writer, log_file_name, is_test=True)
        tmp_test_acc = np.mean(test_acc)
        if tmp_test_acc > best_acc:
         
            best_acc = tmp_test_acc
        

    print ("Start to train.....")
    #model.load_state_dict(torch.load('/home/mcislab/linhanxi/ucf101_flowOnly/ckpnothresh/ours/checkpoint.pth')['state_dict'])
    for epoch_i in range(opt.epoch, opt.niter):
        scheduler.step()
        
        train(epoch_i, train_dataloader, model, criterion1, criterion2,  optimizer, opt, writer, log_file_name)
        #val_acc, val_out, val_error =test(valid_loader, model, criterion1,criterion2, opt, log_file_name, is_test=False)
        # Lin changed according to 'sth_pre_abl1' on Sept. 3rd
        test_acc, output = test(epoch_i,test_dataloader, model, criterion1, criterion2, opt, writer, log_file_name, is_test=True)
        #test_acc,_ = test(test_dataloader, model, criterion1, criterion2, opt, log_file_name, is_test=True)
        
        tmp_test_acc = np.mean(test_acc)
        sum_test_acc.append(test_acc)
     
        if tmp_test_acc > best_acc:
            is_best = True
            best_acc = tmp_test_acc

        else:
            is_best = False

        utils.save_checkpoint({'epoch': epoch_i , 'state_dict': model.state_dict(), 'optimizer': optimizer.state_dict()},
                              is_best=is_best, directory=opt.resume)
        print ("A training epoch finished!")
       
    # For testing
   
    print ("Training finished.Start to test.")
    loaded_checkpoint = utils.load_best_checkpoint(opt, model, optimizer)
    if loaded_checkpoint:
        opt, model, __ = loaded_checkpoint
    # Lin changed according to 'sth_pre_abl1' on Sept. 3rd
    test_acc,output = test(epoch_i,test_dataloader, model, criterion1, criterion2, opt, writer, log_file_name, is_test=True)
    #test_acc,output = test(test_dataloader, model, criterion1,criterion2,  opt, log_file_name, is_test=True)
    print ("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
    print ("ratio=0.1, test Accuracy:   %.2f " % (100. * test_acc[0][0]))
    print ("ratio=0.2, test Accuracy:   %.2f " % (100. * test_acc[0][1]))
    print ("ratio=0.3, test Accuracy:   %.2f " % (100. * test_acc[0][2]))
    print ("ratio=0.4, test Accuracy:   %.2f " % (100. * test_acc[0][3]))
    print ("ratio=0.5, test Accuracy:   %.2f " % (100. * test_acc[0][4]))
    print ("ratio=0.6, test Accuracy:   %.2f " % (100. * test_acc[0][5]))
    print ("ratio=0.7, test Accuracy:   %.2f " % (100. * test_acc[0][6]))
    print ("ratio=0.8, test Accuracy:   %.2f " % (100. * test_acc[0][7]))
    print ("ratio=0.9, test Accuracy:   %.2f " % (100. * test_acc[0][8]))
    print ("ratio=1.0, test Accuracy:   %.2f " % (100. * test_acc[0][9]))
    print ("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")