Пример #1
0
def main(opt):
    if not os.path.exists(opt.resume):
        os.makedirs(opt.resume)
    if not os.path.exists(opt.logroot):
        os.makedirs(opt.logroot)
    #log_dir_name = 'split'+opt.split + '/'
    log_dir_name = 'split'+opt.split + '/'+str(opt.manualSeed)+'/'
    opt.resume = os.path.join(opt.resume,log_dir_name)
    log_path = os.path.join(opt.logroot,log_dir_name)
    if not os.path.exists(log_path):
        os.makedirs(log_path)
    #log_file_name = log_path + 'ucf_log_st.txt'
    log_file_name = log_path + 'ucf_log_v5.0_st_'+str(opt.manualSeed)+'.txt'

    with open(log_file_name,'a+') as file:
        file.write('manualSeed is %d \n' % opt.manualSeed)
        file.write('state_dim is %d \n' % opt.state_dim)
        file.write('num_bottleneck is %d \n' % opt.num_bottleneck)
    paths = config.Paths()

    train_datalist = '/home/mcislab/wangruiqi/IJCV2019/data/ucf101Vid_train_lin_split'+opt.split+'.txt'
    test_datalist = '/home/mcislab/wangruiqi/IJCV2019/data/ucf101Vid_val_lin_split'+opt.split+'.txt'
    #train_datalist = '/home/mcislab/wangruiqi/IJCV2019/data/test.txt'
    #test_datalist = '/home/mcislab/wangruiqi/IJCV2019/data/test.txt'
    #train_dataset = dataset(train_datalist, paths.detect_root_ucf_mmdet, paths.img_root_ucf,paths.rgb_res18_ucf,paths.rgb_res18_ucf, opt)
    train_dataset = dataset(train_datalist, paths.bninception_ucf,opt)####zhao changed ###paths.resnet50_ucf_rgbflow_same
    train_dataloader = DataLoader(train_dataset, batch_size=opt.batch_size, shuffle=True, num_workers=opt.workers, drop_last=False)
    #test_dataset = dataset(test_datalist, paths.detect_root_ucf_mmdet, paths.img_root_ucf,paths.rgb_res18_ucf,paths.rgb_res18_ucf, opt)
    test_dataset = dataset(test_datalist, paths.bninception_ucf,opt)###paths.resnet50_ucf_rgbflow_same
    test_dataloader = DataLoader(test_dataset, batch_size=opt.batch_size, shuffle=False, num_workers=opt.workers, drop_last=False)

    model = ucf_main_without_systhesizing_model.Model(opt)
    #optimizer = optim.Adam(model.parameters(), lr=opt.lr, weight_decay=opt.weight_decay)
    optimizer = optim.SGD(model.parameters(), lr=opt.lr,momentum=opt.momentum)
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=200, gamma=0.9)
    criterion1 = nn.CrossEntropyLoss()
    criterion2 = nn.NLLLoss()

    if opt.cuda:
        
        model.cuda()
        #criterion.cuda(opt.device_id)
        criterion1.cuda()
        criterion2.cuda()

    
    '''
    if opt.epoch != 0:
        if os.path.exists('./models/hmdb_split1/'+checkpoint_model_name):
            model.load_state_dict(torch.load('./models/hmdb_split1/' + checkpoint_model_name))
        else:
            print('model not found')
            exit()
    '''
    #Lin commented on Sept. 2nd
    #model.double()


    writer = SummaryWriter(log_dir=log_path+'runs/')
    # For training
    sum_test_acc = []
    best_acc = 0.
    epoch_errors = list()
    avg_epoch_error = np.inf
    best_epoch_error = np.inf
    '''
    #haha, output Acc for each class
    model.load_state_dict(torch.load('/home/mcislab/linhanxi/ucf101_NewFeat_RGBtuned/ckpnothresh/ours/model_best.pth')['state_dict'])
    test_acc, output = test(0,test_dataloader, model, criterion1, criterion2, opt, writer, log_file_name, is_test=True)
    exit()
    
    #load last experiment best model
    print("load last experiment best model")
    model.load_state_dict(torch.load('/home/mcislab/zhaojw/AAAI/prediction2020/models/ucf101_res50/split1/1050/model_best.pth')['state_dict'])
    test_acc, output = test(0,test_dataloader, model, criterion1, criterion2, opt, writer, log_file_name, is_test=True)
    '''
    
    print ("Test once for a baseline.")
    loaded_checkpoint =utils.load_best_checkpoint(opt, model, optimizer)
    if loaded_checkpoint:
        #opt, model, optimizer = loaded_checkpoint
        opt, model, __ = loaded_checkpoint
        test_acc, output = test(1,test_dataloader, model, criterion1, criterion2, opt, writer, log_file_name, is_test=True)
        tmp_test_acc = np.mean(test_acc)
        if tmp_test_acc > best_acc:
         
            best_acc = tmp_test_acc
        

    print ("Start to train.....")
    #model.load_state_dict(torch.load('/home/mcislab/linhanxi/ucf101_flowOnly/ckpnothresh/ours/checkpoint.pth')['state_dict'])
    for epoch_i in range(opt.epoch, opt.niter):
        scheduler.step()
        
        train(epoch_i, train_dataloader, model, criterion1, criterion2,  optimizer, opt, writer, log_file_name)
        #val_acc, val_out, val_error =test(valid_loader, model, criterion1,criterion2, opt, log_file_name, is_test=False)
        # Lin changed according to 'sth_pre_abl1' on Sept. 3rd
        test_acc, output = test(epoch_i,test_dataloader, model, criterion1, criterion2, opt, writer, log_file_name, is_test=True)
        #test_acc,_ = test(test_dataloader, model, criterion1, criterion2, opt, log_file_name, is_test=True)
        
        tmp_test_acc = np.mean(test_acc)
        sum_test_acc.append(test_acc)
     
        if tmp_test_acc > best_acc:
            is_best = True
            best_acc = tmp_test_acc

        else:
            is_best = False

        utils.save_checkpoint({'epoch': epoch_i , 'state_dict': model.state_dict(), 'optimizer': optimizer.state_dict()},
                              is_best=is_best, directory=opt.resume)
        print ("A training epoch finished!")
       
    # For testing
   
    print ("Training finished.Start to test.")
    loaded_checkpoint = utils.load_best_checkpoint(opt, model, optimizer)
    if loaded_checkpoint:
        opt, model, __ = loaded_checkpoint
    # Lin changed according to 'sth_pre_abl1' on Sept. 3rd
    test_acc,output = test(epoch_i,test_dataloader, model, criterion1, criterion2, opt, writer, log_file_name, is_test=True)
    #test_acc,output = test(test_dataloader, model, criterion1,criterion2,  opt, log_file_name, is_test=True)
    print ("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
    print ("ratio=0.1, test Accuracy:   %.2f " % (100. * test_acc[0][0]))
    print ("ratio=0.2, test Accuracy:   %.2f " % (100. * test_acc[0][1]))
    print ("ratio=0.3, test Accuracy:   %.2f " % (100. * test_acc[0][2]))
    print ("ratio=0.4, test Accuracy:   %.2f " % (100. * test_acc[0][3]))
    print ("ratio=0.5, test Accuracy:   %.2f " % (100. * test_acc[0][4]))
    print ("ratio=0.6, test Accuracy:   %.2f " % (100. * test_acc[0][5]))
    print ("ratio=0.7, test Accuracy:   %.2f " % (100. * test_acc[0][6]))
    print ("ratio=0.8, test Accuracy:   %.2f " % (100. * test_acc[0][7]))
    print ("ratio=0.9, test Accuracy:   %.2f " % (100. * test_acc[0][8]))
    print ("ratio=1.0, test Accuracy:   %.2f " % (100. * test_acc[0][9]))
    print ("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
Пример #2
0
def main(opt):
    if not os.path.exists(opt.resume):
        os.makedirs(opt.resume)
    if not os.path.exists(opt.logroot):
        os.makedirs(opt.logroot)

    log_dir_name = str(opt.manualSeed) + '/'
    log_path = os.path.join(opt.logroot, log_dir_name)
    opt.resume = os.path.join(opt.resume, log_dir_name)
    if not os.path.exists(log_path):
        os.makedirs(log_path)
    #log_file_name = log_path + 'ucf_log_st.txt'
    #log_file_name = opt.logroot + 'ucf_log_st_'+str(opt.manualSeed)+'.txt'

    log_file_name = opt.logroot + 'something_log_v4.1_' + str(
        opt.manualSeed) + '.txt'

    with open(log_file_name, 'a+') as file:
        file.write('manualSeed is %d \n' % opt.manualSeed)
    paths = config.Paths()

    train_datalist = "/home/mcislab/zhaojw/AAAI/sth_train_list.txt"
    val_datalist = "/home/mcislab/zhaojw/AAAI/sth_val_list.txt"
    test_datalist = "/home/mcislab/zhaojw/AAAI/sth_test_list.txt"
    #test_datalist = '/home/mcislab/wangruiqi/IJCV2019/data/newsomething-check.txt'
    #opt.resume = os.path.join(opt.resume,log_dir_name)

    train_dataset = dataset(train_datalist, paths.sthv2_final, opt)

    train_dataloader = DataLoader(train_dataset,
                                  batch_size=opt.batch_size,
                                  shuffle=True,
                                  num_workers=opt.workers,
                                  drop_last=False)

    val_dataset = dataset(val_datalist, paths.sthv2_final, opt)

    val_dataloader = DataLoader(val_dataset,
                                batch_size=opt.batch_size,
                                shuffle=False,
                                num_workers=opt.workers,
                                drop_last=False)

    test_dataset = dataset(test_datalist, paths.sthv2_final, opt)

    test_dataloader = DataLoader(test_dataset,
                                 batch_size=opt.batch_size,
                                 shuffle=False,
                                 num_workers=opt.workers,
                                 drop_last=False)

    model = sthv2_model.Model(opt)
    '''
    if opt.show:
        show(model)
        exit()
    '''

    optimizer = optim.SGD(model.parameters(), lr=opt.lr, momentum=opt.momentum)
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
                                                step_size=100,
                                                gamma=0.9)
    criterion1 = nn.CrossEntropyLoss()
    criterion2 = nn.NLLLoss()
    if opt.cuda:
        model.cuda()
        #criterion.cuda(opt.device_id)
        criterion1.cuda()
        criterion2.cuda()
    '''
    if opt.epoch != 0:
        if os.path.exists('./models/hmdb_split1/'+checkpoint_model_name):
            model.load_state_dict(torch.load('./models/hmdb_split1/' + checkpoint_model_name))
        else:
            print('model not found')
            exit()
    '''
    #Lin commented on Sept. 2nd
    #model.double()

    writer = SummaryWriter(log_dir=os.path.join(log_path, 'runs/'))
    # For training
    sum_test_acc = []
    best_acc = 0.
    #epoch_errors = list()
    avg_epoch_error = np.inf
    best_epoch_error = np.inf
    '''
    #haha, output Acc for each class
    test_load_dir = opt.resume
    #test_load_dir = '/home/mcislab/linhanxi/IJCV19_Experiments/sth_scale/something_scale5_M/ckpnothresh/ours'
    model.load_state_dict(torch.load(os.path.join(test_load_dir, 'model_best.pth'))['state_dict'])
    if opt.featdir:
        model.feat_mode()
    test_acc, output = test(0,test_dataloader, model, criterion1, criterion2, opt, writer, test_load_dir, is_test=True)
    exit()
    '''
    print("Test once to get a baseline.")
    loaded_checkpoint = utils.load_best_checkpoint(opt, model, optimizer)
    if loaded_checkpoint:
        opt, model, optimizer = loaded_checkpoint
        test_acc, output = test(51,
                                test_dataloader,
                                model,
                                criterion1,
                                criterion2,
                                opt,
                                writer,
                                log_file_name,
                                is_test=True)
        tmp_test_acc = np.mean(test_acc)
        if tmp_test_acc > best_acc:

            best_acc = tmp_test_acc

    print("Start to train.....")
    for epoch_i in range(opt.epoch, opt.niter):
        scheduler.step()

        train(epoch_i, train_dataloader, model, criterion1, criterion2,
              optimizer, opt, writer, log_file_name)
        #val_acc, val_out, val_error =test(valid_loader, model, criterion1,criterion2, opt, log_file_name, is_test=False)
        # Lin changed according to 'sth_pre_abl1' on Sept. 3rd
        test_acc, output = val(epoch_i,
                               val_dataloader,
                               model,
                               criterion1,
                               criterion2,
                               opt,
                               writer,
                               log_file_name,
                               is_test=True)
        #test_acc,_ = test(test_dataloader, model, criterion1, criterion2, opt, log_file_name, is_test=True)

        tmp_test_acc = np.mean(test_acc)
        sum_test_acc.append(test_acc)

        if tmp_test_acc > best_acc:
            is_best = True
            best_acc = tmp_test_acc

        else:
            is_best = False

        utils.save_checkpoint(
            {
                'epoch': epoch_i,
                'state_dict': model.state_dict(),
                'optimizer': optimizer.state_dict()
            },
            is_best=is_best,
            directory=opt.resume)
        print("A training epoch finished!")

    #epoch_i =33

    # For testing
    print("Training finished.Start to test.")
    loaded_checkpoint = utils.load_best_checkpoint(opt, model, optimizer)
    if loaded_checkpoint:
        opt, model, optimizer = loaded_checkpoint
    # Lin changed according to 'sth_pre_abl1' on Sept. 3rd
    test_acc, output = test(epoch_i,
                            test_dataloader,
                            model,
                            criterion1,
                            criterion2,
                            opt,
                            writer,
                            log_file_name,
                            is_test=True)
    #test_acc,output = test(test_dataloader, model, criterion1,criterion2,  opt, log_file_name, is_test=True)
    print("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
    print("ratio=0.1, test Accuracy:   %.2f " % (100. * test_acc[0][0]))
    print("ratio=0.2, test Accuracy:   %.2f " % (100. * test_acc[0][1]))
    print("ratio=0.3, test Accuracy:   %.2f " % (100. * test_acc[0][2]))
    print("ratio=0.4, test Accuracy:   %.2f " % (100. * test_acc[0][3]))
    print("ratio=0.5, test Accuracy:   %.2f " % (100. * test_acc[0][4]))
    print("ratio=0.6, test Accuracy:   %.2f " % (100. * test_acc[0][5]))
    print("ratio=0.7, test Accuracy:   %.2f " % (100. * test_acc[0][6]))
    print("ratio=0.8, test Accuracy:   %.2f " % (100. * test_acc[0][7]))
    print("ratio=0.9, test Accuracy:   %.2f " % (100. * test_acc[0][8]))
    print("ratio=1.0, test Accuracy:   %.2f " % (100. * test_acc[0][9]))
    print("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
Пример #3
0
def main(opt):
    if not os.path.exists(opt.resume):
        os.makedirs(opt.resume)
    if not os.path.exists(opt.logroot):
        os.makedirs(opt.logroot)

  
    paths = cad120_config.Paths()
    #print (paths.tmp_root)
    subject_ids = pickle.load(open(os.path.join(paths.tmp_root, 'cad120_data_list.p'), 'rb'))

    data_path = os.path.join(paths.tmp_root, 'cad120_data_pred.p')
    #data_path =  '/media/mcislab/wrq/CAD120/pred-feature/cad120_data_pred.p'
    test_acc_final = np.zeros([4, opt.seq_size])
    sub_index = 0
    
    resume_root = os.path.join(opt.resume,str(opt.manualSeed))
    
    for sub, seqs in subject_ids.items():  # cross-validation for each subject
        #sub='Subject'+str(opt.subject)
        log_file_name = opt.logroot+'cad120_log_sub'+sub+'.txt'
        with open(log_file_name,'a+') as file:
            file.write('manualSeed is %d \n' % opt.manualSeed)
        opt.resume = resume_root+ sub + '/'

        training_subject = pickle.load(open(os.path.join(paths.tmp_root, 'cad120_data_list.p'), 'rb'))  # if not reload it will delete both in subject_ids and training_sub
        testing_subject = dict()
        testing_subject[sub] = seqs
        
        del training_subject[sub]

        
        #print training_subject
        #print testing_subject

        training_set = CAD120(data_path, training_subject)

        testing_set = CAD120(data_path, testing_subject)

        #testing_set = CAD120(data_path, sequence_ids[-test_num:])


        train_loader = torch.utils.data.DataLoader(training_set, collate_fn=utils.collate_fn_cad,batch_size=opt.batch_size,
                                                   num_workers=opt.workers, shuffle=True, pin_memory=True)
        #valid_loader = torch.utils.data.DataLoader(valid_set, collate_fn=utils.collate_fn_cad,
        #                                           batch_size=opt.batch_size,
        #                                           num_workers=opt.workers, shuffle=False, pin_memory=True)
        test_loader = torch.utils.data.DataLoader(testing_set,collate_fn=utils.collate_fn_cad,batch_size=opt.batch_size,
                                                  num_workers=opt.workers, shuffle=False, pin_memory=True)

        model = models.Model(opt) 
        optimizer = optim.Adam(model.parameters(), lr=opt.lr, weight_decay=opt.weight_decay)
        scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=100, gamma=0.9)
        criterion1 = nn.CrossEntropyLoss()
        criterion2 = nn.NLLLoss()
        if opt.cuda:
            model.cuda()
            #criterion.cuda(opt.device_id)
            criterion1.cuda()
            criterion2.cuda()


        loaded_checkpoint = utils.load_best_checkpoint(opt, model, optimizer)
        if loaded_checkpoint:
            opt, model, optimizer = loaded_checkpoint
        '''
        if opt.epoch != 0:
            if os.path.exists('./models/hmdb_split1/'+checkpoint_model_name):
                model.load_state_dict(torch.load('./models/hmdb_split1/' + checkpoint_model_name))
            else:
                print('model not found')
                exit()
        '''
        #model.double()


        writer = SummaryWriter(log_dir=opt.logroot+'runs/'+sub+'/')
        # For training
        sum_test_acc = []
        best_acc = 0.
        epoch_errors = list()
        avg_epoch_error = np.inf
        best_epoch_error = np.inf
        
        for epoch_i in range(opt.epoch, opt.niter):
            scheduler.step()
            train(epoch_i, train_loader, model, criterion1,criterion2,  optimizer, opt, writer, log_file_name)
            #val_acc, val_out, val_error =test(valid_loader, model, criterion1,criterion2, opt, log_file_name, is_test=False)
            test_acc,output = test(epoch_i,test_loader, model, criterion1, criterion2, opt,writer, log_file_name)
            
            tmp_test_acc = np.mean(test_acc)
            sum_test_acc.append(test_acc)
            
            if tmp_test_acc > best_acc:
                is_best = True
                best_acc = tmp_test_acc


            else:
                is_best = False

            utils.save_checkpoint({'epoch': epoch_i + 1, 'state_dict': model.state_dict(), 'optimizer': optimizer.state_dict()},
                                  is_best=is_best, directory=opt.resume)
        
        # For testing
        loaded_checkpoint = utils.load_best_checkpoint(opt, model, optimizer)
        if loaded_checkpoint:
            opt, model, optimizer = loaded_checkpoint
        test_acc,output = test(epoch_i,test_loader, model, criterion1, criterion2, opt,writer, log_file_name)
        print ("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
        print ("ratio=0.1, test Accuracy:   %.2f " % (100. * test_acc[0][0]))
        print ("ratio=0.2, test Accuracy:   %.2f " % (100. * test_acc[0][1]))
        print ("ratio=0.3, test Accuracy:   %.2f " % (100. * test_acc[0][2]))
        print ("ratio=0.4, test Accuracy:   %.2f " % (100. * test_acc[0][3]))
        print ("ratio=0.5, test Accuracy:   %.2f " % (100. * test_acc[0][4]))
        print ("ratio=0.6, test Accuracy:   %.2f " % (100. * test_acc[0][5]))
        print ("ratio=0.7, test Accuracy:   %.2f " % (100. * test_acc[0][6]))
        print ("ratio=0.8, test Accuracy:   %.2f " % (100. * test_acc[0][7]))
        print ("ratio=0.9, test Accuracy:   %.2f " % (100. * test_acc[0][8]))
        print ("ratio=1.0, test Accuracy:   %.2f " % (100. * test_acc[0][9]))
        print ("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
        sum_test_acc = np.array(sum_test_acc)
        sum_test_acc=sum_test_acc.reshape(opt.niter, opt.seq_size)
        scio.savemat(opt.logroot+sub+'_result.mat',{'test_acc':sum_test_acc})
        scio.savemat(opt.logroot+sub+'_output.mat',{'test_out':output})

        test_acc_final[sub_index, :] = test_acc
        
        with open(log_file_name, 'a+') as file:
            file.write("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n")
            file.write("ratio=0.1, test Accuracy: %.2f \n" % (100. * test_acc[0][0]))
            file.write("ratio=0.2, test Accuracy: %.2f \n" % (100. * test_acc[0][1]))
            file.write("ratio=0.3, test Accuracy: %.2f \n" % (100. * test_acc[0][2]))
            file.write("ratio=0.4, test Accuracy: %.2f \n" % (100. * test_acc[0][3]))
            file.write("ratio=0.5, test Accuracy: %.2f \n" % (100. * test_acc[0][4]))
            file.write("ratio=0.6, test Accuracy: %.2f \n" % (100. * test_acc[0][5]))
            file.write("ratio=0.7, test Accuracy: %.2f \n" % (100. * test_acc[0][6]))
            file.write("ratio=0.8, test Accuracy: %.2f \n" % (100. * test_acc[0][7]))
            file.write("ratio=0.9, test Accuracy: %.2f \n" % (100. * test_acc[0][8]))
            file.write("ratio=1.0, test Accuracy: %.2f \n" % (100. * test_acc[0][9]))
        sub_index = sub_index + 1
        writer.close()


    test_final = np.mean(test_acc_final,0)
    #print type(test_final)
    #print test_final
    print ("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
    print ("ratio=0.1, test Accuracy:   %.2f " % (100. * test_final[0]))
    print ("ratio=0.2, test Accuracy:   %.2f " % (100. * test_final[1]))
    print ("ratio=0.3, test Accuracy:   %.2f " % (100. * test_final[2]))
    print ("ratio=0.4, test Accuracy:   %.2f " % (100. * test_final[3]))
    print ("ratio=0.5, test Accuracy:   %.2f " % (100. * test_final[4]))
    print ("ratio=0.6, test Accuracy:   %.2f " % (100. * test_final[5]))
    print ("ratio=0.7, test Accuracy:   %.2f " % (100. * test_final[6]))
    print ("ratio=0.8, test Accuracy:   %.2f " % (100. * test_final[7]))
    print ("ratio=0.9, test Accuracy:   %.2f " % (100. * test_final[8]))
    print ("ratio=1.0, test Accuracy:   %.2f " % (100. * test_final[9]))
    print ("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
    with open(log_file_name, 'a+') as file:
        file.write("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n")
        file.write("Cross-subject performance is:\n")
        file.write("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n")
        file.write("ratio=0.1, test Accuracy: %.2f \n" % (100. * test_final[0]))
        file.write("ratio=0.2, test Accuracy: %.2f \n" % (100. * test_final[1]))
        file.write("ratio=0.3, test Accuracy: %.2f \n" % (100. * test_final[2]))
        file.write("ratio=0.4, test Accuracy: %.2f \n" % (100. * test_final[3]))
        file.write("ratio=0.5, test Accuracy: %.2f \n" % (100. * test_final[4]))
        file.write("ratio=0.6, test Accuracy: %.2f \n" % (100. * test_final[5]))
        file.write("ratio=0.7, test Accuracy: %.2f \n" % (100. * test_final[6]))
        file.write("ratio=0.8, test Accuracy: %.2f \n" % (100. * test_final[7]))
        file.write("ratio=0.9, test Accuracy: %.2f \n" % (100. * test_final[8]))
        file.write("ratio=1.0, test Accuracy: %.2f \n" % (100. * test_final[9]))