Exemple #1
0
def softmax_test(target):
    x = np.array([[9, 2, 5, 0, 0], [7, 5, 0, 0, 0]])
    expected_output = np.array([[
        9.80897665e-01, 8.94462891e-04, 1.79657674e-02, 1.21052389e-04,
        1.21052389e-04
    ],
                                [
                                    8.78679856e-01, 1.18916387e-01,
                                    8.01252314e-04, 8.01252314e-04,
                                    8.01252314e-04
                                ]])
    test_cases = [{
        "name": "datatype_check",
        "input": [x],
        "expected": np.ndarray,
        "error": "The function should return a numpy array."
    }, {
        "name": "shape_check",
        "input": [x],
        "expected": expected_output,
        "error": "Wrong shape"
    }, {
        "name": "equation_output_check",
        "input": [x],
        "expected": expected_output,
        "error": "Wrong output"
    }]

    test(test_cases, target)
Exemple #2
0
def image2vector_test(target):
    image = np.array([[[0.67826139, 0.29380381], [0.90714982, 0.52835647],
                       [0.4215251, 0.45017551]],
                      [[0.92814219, 0.96677647], [0.85304703, 0.52351845],
                       [0.19981397, 0.27417313]],
                      [[0.60659855, 0.00533165], [0.10820313, 0.49978937],
                       [0.34144279, 0.94630077]]])

    expected_output = np.array([[0.67826139], [0.29380381], [0.90714982],
                                [0.52835647], [0.4215251], [0.45017551],
                                [0.92814219], [0.96677647], [0.85304703],
                                [0.52351845], [0.19981397], [0.27417313],
                                [0.60659855], [0.00533165], [0.10820313],
                                [0.49978937], [0.34144279], [0.94630077]])
    test_cases = [{
        "name": "datatype_check",
        "input": [image],
        "expected": np.ndarray,
        "error": "The function should return a numpy array."
    }, {
        "name": "shape_check",
        "input": [image],
        "expected": expected_output,
        "error": "Wrong shape"
    }, {
        "name": "equation_output_check",
        "input": [image],
        "expected": expected_output,
        "error": "Wrong output"
    }]

    test(test_cases, target)
def test():
    "Please, try to update that everytime it is a change in the tests."
    ## Init tests
    t0 = time.time()
    print("")
    print(message_init_tests)
    print(ref_computer_stats)
    print(message_ref_computer % time_ref_computer)
    ## Performing tests
    with warnings.catch_warnings():
        warnings.simplefilter("ignore")
        #warnings.simplefilter("error")
        test_pythonUtils.test()
        test_io.test()
        test_utils.test()
        test_neighsinfo.test()
        test_preprocess.test()
        test_retriever.test()
        test_spatial_discretizer.test()
        test_descriptormodels.test()
        test_features_objects.test()
        test_features_retriever.test()
        test_perturbation.test()
        test_spatial_relations.test()
        test_spdescriptormodels.test()
        test_interpolation.test()
        test_sampling.test()
        test_api.test()
    ## Closing tests
    time_own_computer = str(np.round(time.time()-t0, 2))
    print(message_own_computer % time_own_computer)
Exemple #4
0
def basic_sigmoid_test(target):
    x = 1
    expected_output = 0.7310585786300049
    test_cases = [{
        "name": "datatype_check",
        "input": [x],
        "expected": float,
        "error": "Datatype mismatch."
    }, {
        "name": "equation_output_check",
        "input": [x],
        "expected": expected_output,
        "error": "Wrong output."
    }]

    test(test_cases, target)
Exemple #5
0
def L1_test(target):
    yhat = np.array([.9, 0.2, 0.1, .4, .9])
    y = np.array([1, 0, 0, 1, 1])
    expected_output = 1.1
    test_cases = [{
        "name": "datatype_check",
        "input": [yhat, y],
        "expected": float,
        "error": "The function should return a float."
    }, {
        "name": "equation_output_check",
        "input": [yhat, y],
        "expected": expected_output,
        "error": "Wrong output"
    }]

    test(test_cases, target)
Exemple #6
0
def main(args):
    train_loader, val_loader = custom_data_loader.customDataloader(args)

    model = custom_model.buildModel(args)
    optimizer, scheduler, records = solver_utils.configOptimizer(args, model)
    criterion = solver_utils.Criterion(args)
    recorder  = recorders.Records(args.log_dir, records)

    tf_train_writer, tf_test_writer = tfboard.tensorboard_init()

    for epoch in range(args.start_epoch, args.epochs+1):
        scheduler.step()
        recorder.insertRecord('train', 'lr', epoch, scheduler.get_lr()[0])

        train_utils.train(args, train_loader, model, criterion, optimizer, log, epoch, recorder, tf_train_writer)
        if epoch % args.save_intv == 0: 
            model_utils.saveCheckpoint(args.cp_dir, epoch, model, optimizer, recorder.records, args)

        if epoch % args.val_intv == 0:
            test_utils.test(args, 'val', val_loader, model, log, epoch, recorder, tf_test_writer)
Exemple #7
0
def sigmoid_derivative_test(target):
    x = np.array([1, 2, 3])
    expected_output = np.array([0.19661193, 0.10499359, 0.04517666])
    test_cases = [{
        "name": "datatype_check",
        "input": [x],
        "expected": np.ndarray,
        "error": "The function should return a numpy array."
    }, {
        "name": "shape_check",
        "input": [x],
        "expected": expected_output,
        "error": "Wrong shape."
    }, {
        "name": "equation_output_check",
        "input": [x],
        "expected": expected_output,
        "error": "Wrong output."
    }]

    test(test_cases, target)
Exemple #8
0
def sigmoid_test(target):
    x = np.array([1, 2, 3])
    expected_output = np.array([0.73105858, 0.88079708, 0.95257413])
    test_cases = [{
        "name": "datatype_check",
        "input": [x],
        "expected": np.ndarray,
        "error": "Datatype mismatch."
    }, {
        "name": "shape_check",
        "input": [x],
        "expected": expected_output,
        "error": "Wrong shape."
    }, {
        "name": "equation_output_check",
        "input": [x],
        "expected": expected_output,
        "error": "Wrong output."
    }]

    test(test_cases, target)
Exemple #9
0
def final_test(model, CFG, final_test_loaders):
    test_loaders_src, test_loaders_tar = final_test_loaders  # 测试集,源域与目标域的
    # 开始测试
    acc_test_srcs, acc_test_tars = [], []  # 正确率容器
    for i in range(len(test_loaders_tar)):  # 遍历所有测试集
        #test_loader_src, test_loader_tar = test_loaders_src[i], test_loaders_tar[i]    # 一个有s,一个没有
        acc_collection_test, loss_collection_test, _ = test(
            model, test_loaders_src[i], test_loaders_tar[i], 0,
            CFG)  # 依次对每个测试集进行测试
        acc_test_srcs.append(acc_collection_test[0])  # 将第0个和第6个测试集的正确率作为最终投票结果
        acc_test_tars.append(acc_collection_test[6])
    return acc_test_srcs, acc_test_tars
Exemple #10
0
def normalizeRows_test(target):
    x = np.array([[0, 3, 4], [1, 6, 4]])
    expected_output = np.array([[0., 0.6, 0.8],
                                [0.13736056, 0.82416338, 0.54944226]])

    test_cases = [{
        "name": "datatype_check",
        "input": [x],
        "expected": np.ndarray,
        "error": "The function should return a numpy array."
    }, {
        "name": "shape_check",
        "input": [x],
        "expected": expected_output,
        "error": "Wrong shape"
    }, {
        "name": "equation_output_check",
        "input": [x],
        "expected": expected_output,
        "error": "Wrong output"
    }]

    test(test_cases, target)
### Test - stage 6
### Part of: Blivet test collection
### Author: [email protected]
### This program is under GPL licence.
import classes
import test_utils

loginst_test = test_utils.init_logging(0, None, True)
test_utils.create_new_alloc_table("vdb")
test_utils.create_new_partition("vdb", "extended", 1, -1)

tsep = classes.SystemExtended_Scan('vdb', 1)
tbep = classes.BlivetInitialization('vdb', 1).child
loginst_test.debug("Comparing object attributes")
ia = test_utils.test(tsep, tbep)
test_utils.write_issues(ia, "Extended partition test", 6)
finish = 512 + start


## Test partitions
list_of_tests = []
list_of_blivet = []
list_of_ia = []

loginst_test.debug("Setting SystemPartitionFormatted_Scan")
for inc in range(4):
    loginst_test.debug("Setting partition {}".format(inc + 1))
    if inc == 3:
        test_utils.create_new_partition("vdb", "primary", start, -1)
    else:
        test_utils.create_new_partition("vdb", "primary", start, finish)
        start = finish + 1
        finish = start + 512
    loginst_test.debug("Setting partition {} - formatting to {}".format(inc + 1, "ext4"))
    test_utils.format_new_partition("{}{}".format("vdb", inc + 1), "ext4")

    ## init objects
    list_of_tests.append(classes.SystemPartitionFormatted_Scan('vdb', inc + 1))
    list_of_blivet.append(classes.BlivetInitialization('vdb', inc + 1).child)

    ## Store in arrays
    loginst_test.debug("Comparing SystemPartitionFormatted_Scan - partition {} with Blivet instance.".format(inc + 1))
    list_of_ia.append(test_utils.test(list_of_tests[inc], list_of_blivet[inc]))

    ## Store in file.
    test_utils.write_issues(list_of_ia[inc], "Multi partition - part {}".format(inc + 1), 5)
Exemple #13
0
                                      presort=False)

    # 训练该轮并记录数据
    clf_tree = clf_tree.fit(carSet.data,
                            carSet.target,
                            sample_weight=weightArrays)
    trs.append(clf_tree)
    preds = clf_tree.predict(carSet.data)
    #print(preds)
    pbs = clf_tree.predict_proba(carSet.data)

    # 调整数据, 准备下一轮循环 确定r和alpha权重
    r = 0.0
    r = 1 - clf_tree.score(
        carSet.data, carSet.target, sample_weight=weightArrays)
    alpha = 0.5 * math.log((1 - r) / r)
    alphas.append(alpha)
    for i in range(len(carSet.data)):
        for l0 in range(labels):
            for l1 in range(labels):
                D[i, l0,
                  l1] = D[i, l0, l1] * math.exp(0.5 * alpha *
                                                (pbs[i][l0] - pbs[i][l1]))
    Z = sum(sum(sum(D)))
    D /= Z

print("决策树训练结束!\n")
print("进行预测\n")
# 测试数据与期望目标
test_utils.test(test_data, test_target, trs, alphas, numRound, labels)
### Test - stage 2
### Part of: Blivet test collection
### Author: [email protected]
### This program is under GPL licence.
import classes
import test_utils

loginst_test = test_utils.init_logging(0, None, True)
test_utils.create_new_alloc_table("vdb")
loginst_test.debug("Setting up classes.SystemDiskFormatted_Scan")
test_system_formatted = classes.SystemDiskFormatted_Scan('vdb')
test_blivet_formatted = classes.BlivetInitialization('vdb').disk
loginst_test.debug("Comparing object attributes")
ia = test_utils.test(test_system_formatted, test_blivet_formatted)
test_utils.write_issues(ia, "Formatted disk", 2)
Exemple #15
0
def main(args):
    test_loader = custom_data_loader.benchmarkLoader(args)
    model = custom_model.buildModel(args)
    recorder = recorders.Records(args.log_dir)
    test_utils.test(args, 'test', test_loader, model, log, 1, recorder)
    print('\ncreating top-5 nodule dataset...')
    data, label = create_test_dataset(args.sorted_slices_dir, 1)
    testds = prep_dataset(data, label)
    test_loader = data_utils.DataLoader(testds, batch_size=args.batch_size, shuffle=False)
    print('  dataset ready!')

    model = Predictor()
    if args.cuda:
        model.cuda()

    if args.cuda:
        model.load_state_dict(torch.load(args.resume))
    else:
        model.load_state_dict(torch.load(args.resume, map_location=torch.device('cpu')))

    output1 = np.load('src/preds3D_val.npy') #subtraction of year 2000 and 1999 masks + sum + sigmoid scores
    output, label = test(model, test_loader, args)
    output = output.data.cpu().numpy()
    PP = 0.68
    outputx = output[:,1]*PP + output1[:,0]*(1-PP)

    AUC = roc_auc_score(label, outputx)
    print('\n%s\nAUC: %.4f\n%s'%(28*'*',AUC,28*'*'))

    print('\ntotal elapsed time: %0.2f min\n' % ((time.time() - ini_t_time)/60.0))




### Test - stage 3
### Part of: Blivet test collection
### Author: [email protected]
### This program is under GPL licence.
import classes
import test_utils

loginst_test = test_utils.init_logging(0, None, True)
test_utils.create_new_alloc_table("vdb")
test_utils.create_new_partition("vdb", "primary", 1, -1)

loginst_test.debug("Setting up classes.SystemPartition_Scan")
test_blivet_partition = classes.BlivetInitialization('vdb', 1).child
test_system_partition = classes.SystemPartition_Scan('vdb', 1)
loginst_test.debug("Comparing object attributes")
ia = test_utils.test(test_system_partition, test_blivet_partition)
test_utils.write_issues(ia, "Partitioned disk", 3)
### Test - stage 1
### Part of: Blivet test collection
### Author: [email protected]
### This program is under GPL licence.
import classes
import test_utils

loginst_test = test_utils.init_logging(0, None, True)
loginst_test.debug("Setting up classes.SystemDisk_Scan")
test_system = classes.SystemDisk_Scan('vdb')
test_blivet = classes.BlivetInitialization('vdb').disk
loginst_test.debug("Comparing object attributes")
ia = test_utils.test(test_system, test_blivet)
test_utils.write_issues(ia, "Basic disk", 1)
Exemple #19
0
# Basic automated test_utils for the search API
#
from test_utils import test


def check_photo(animals):
    for animal in animals:
        try:
            if (animal['MainPhoto']['default'].find('jpg/320-') > 0):
                return True
        except:
            "ignore"
    return False


test1 = test('/search?AnimalType=ALL&Location=ALL&StatusCategory=available',
             lambda check: check_photo(check['response']))

details = [
    test('/animal?Id=%d' % animal['AnimalId'], lambda ok: True)
    for animal in test1['response'][0:9]
]
sortable = [a['response']['Id'] for a in details]

if (sortable != sorted(sortable)):
    print("the sortable items are not in order: " + str(sortable))
    raise

test(
    '/search?AnimalType=Cat&Location=ALL&StatusCategory=rescue',
    lambda check: set(['Cat']) == set(
        [animal['AnimalType'] for animal in check['response']]))
Exemple #20
0
def train(loaders, model, optimizer, scheduler, CFG):
    acc_test_tar_max = 0  # 最大的目标域测试正确率,用于在最大测试正确率的时候保存pt文件
    train_loader_src, test_loader_src, train_loader_tar, test_loader_tar = loaders  # 样本加载器
    len_src_loader, len_tar_loader = len(train_loader_src), len(
        train_loader_tar)  # 样本中的minibatch的数目
    train_loss_clf_src, train_loss_clf_tar, train_loss_tran, train_loss_total = utils.AverageMeter(
    ), utils.AverageMeter(), utils.AverageMeter(), utils.AverageMeter(
    )  # 用于存储误差
    acc_collection, loss_collection,acc_collection_test, loss_collection_test, assistant_collection = [],[],[],[],[]       # 用于收集整个过程中的正确率与误差
    loss_24, loss_24_epoch, loss_24_collection,loss_24_collection_test = [],[],[],[]   # 24个loss保存容器

    #for ep in range(CFG['epoch']):        # 遍历每一个epoch
    for ep in range(50):  # 遍历每一个epoch
        # labda随着epoch调整
        labda1 = 2 / (1 +
                      math.exp(-10 *
                               (ep) / CFG['epoch'])) - 1  # 随着ep的变化,labda会变化
        CFG['lambda'] = 1 * labda1  # 调整分类损失和迁移损失之间的动态平衡
        # 学习速率衰减
        if ep > 50:  # 设置学习速率从epoch=0开始衰减
            if ('RevGrad' in CFG['tranmodel']):  # RevGrad模型单独考虑
                scheduler[0].step()  # FE+TC的学习速率
                scheduler[1].step()  # DomClf的学习速率
            else:
                scheduler.step()  # FE+TC的学习速率

        model.train()  # 开始训练,向模型申明目前是训练阶段,
        # 训练监测指标的存储器
        corr_src, corr_tar = 0, 0  # 正确样本数统计,对于没有投票功能的网络模型
        corr_multi_src, corr_multi_tar = [0, 0, 0, 0,
                                          0], [0, 0, 0, 0,
                                               0]  # 正确样本数统计,对于有投票多分类器的网络
        corr_voted_src, corr_voted_tar = 0, 0  # 投票结果的正确数统计
        loss_tran_multi, loss_clf_multi_src, loss_sum = [0, 0, 0, 0, 0], [
            0, 0, 0, 0, 0
        ], [0, 0, 0, 0, 0]  # loss分支记录
        # 样本生成器
        iter_src, iter_tar = iter(train_loader_src), iter(
            train_loader_tar)  # 样本batch的迭代器
        n_batch = min(len_src_loader,
                      len_tar_loader)  # batch数,当源域与目标域的batch数目不一样的时候使用
        # 各类损失函数
        loss_clf_fun = torch.nn.CrossEntropyLoss()  # 交叉熵损失函数
        #loss_clf_fun = F.nll_loss                          # CrossEntropyLoss()=log_softmax() + NLLLoss()
        loss_mse_fun = torch.nn.MSELoss()  # MSE损失函数
        time_cost = 0  # 训练耗时

        for ib in range(n_batch):  # 遍历所有n_batch
            data_src, label_src = iter_src.next()  # 训练与测试样本的迭代器
            data_tar, label_tar = iter_tar.next()
            data_src, label_src = data_src.to(DEVICE), label_src.to(
                DEVICE)  # 将当前batch拷贝到指定的运算设备上
            data_tar, label_tar = data_tar.to(DEVICE), label_tar.to(
                DEVICE)  # 每次只拷贝1个batch的样本进GPU,避免占用GPU内存过大

            if ('RevGrad' in CFG['tranmodel']):  #
                optimizer[0].zero_grad()  # 每个batch结束后都对梯度进行一次清理
                optimizer[1].zero_grad()  # 每个batch结束后都对梯度进行一次清理
            else:
                optimizer.zero_grad()  # 每个batch结束后都对梯度进行一次清理

            clfout_src, feamap_src = model(data_src)  # 源域与目标域样本模型前馈
            clfout_tar, feamap_tar = model(data_tar)
            model.train()
            ###############################################
            start_time = time.clock()  # 训练开始时间

            ###############################################
            ''' MSTVM 投票网络'''
            ###############################################

            if CFG['tranmodel'] == 'DDC_MSTVM':
                loss_tran = 0
                loss_multi_tran = []
                for ic in range(len(feamap_src)):  # 遍历每个灯笼分支
                    loss_tran_temp = loss_adapt_fun(feamap_src[ic],
                                                    feamap_tar[ic], 'mmd')
                    loss_tran += loss_tran_temp
                    loss_multi_tran.append(loss_tran_temp)

                return_packt = voter_multi_classifier(
                    clfout_src[1:], clfout_tar[1:], label_src, label_tar,
                    corr_multi_src, corr_multi_tar, corr_voted_src,
                    corr_voted_tar)
                corr_multi_src, corr_multi_tar, corr_voted_src, corr_voted_tar, loss_multi_clsclf_src, loss_multi_clsclf_tar, loss_clsclf_src, loss_clsclf_tar = return_packt

                loss = loss_clsclf_src + CFG['lambda'] * loss_tran

                loss_24 = [loss, 0, 0, 0, 0, 0,
                           loss_clsclf_src] + loss_multi_clsclf_src + [
                               loss_clsclf_tar
                           ] + loss_multi_clsclf_tar + [loss_tran
                                                        ] + loss_multi_tran

            ###############################################
            if CFG['tranmodel'] == 'RevGrad_MSTVM':
                loss_domclf_src, loss_multi_domclf_src = RevGrad_assistant(
                    model.DomClf_net, feamap_src, label_src, 0)  # 梯度翻转的辅助函数
                loss_domclf_tar, loss_multi_domclf_tar = RevGrad_assistant(
                    model.DomClf_net, feamap_tar, label_tar, 1)
                loss_tran = loss_domclf_src + loss_domclf_tar

                return_packt = voter_multi_classifier(
                    clfout_src[1:], clfout_tar[1:], label_src, label_tar,
                    corr_multi_src, corr_multi_tar, corr_voted_src,
                    corr_voted_tar)
                corr_multi_src, corr_multi_tar, corr_voted_src, corr_voted_tar, loss_multi_clsclf_src, loss_multi_clsclf_tar, loss_clsclf_src, loss_clsclf_tar = return_packt
                loss_domclf = loss_tran  # 用于domclf的单独参数更新
                loss = loss_clsclf_src + CFG['lambda'] * loss_tran

                loss_multi_tran = []
                for il in range(len(loss_multi_domclf_src)):
                    loss_multi_tran_temp = loss_multi_domclf_src[
                        il] + loss_multi_domclf_tar[il]
                    loss_multi_tran.append(loss_multi_tran_temp)
                loss_24 = [loss, 0, 0, 0, 0, 0,
                           loss_clsclf_src] + loss_multi_clsclf_src + [
                               loss_clsclf_tar
                           ] + loss_multi_clsclf_tar + [loss_tran
                                                        ] + loss_multi_tran

            ###############################################
            ''' 汇总 '''
            ###############################################
            # 损失函数反向传播
            # 损失函数反向传播
            if ('RevGrad' in CFG['tranmodel']):
                loss_domclf.backward(retain_graph=True)  # 域判别器  误差反向传播,求梯度
                optimizer[1].step()  # 域判别器  误将delta更新在权值与偏置值上
                loss.backward()  # 主干部分 误差反向传播,求梯度
                optimizer[0].step()  # 主干部分 将delta更新在权值与偏置值上
            else:
                loss.backward()  # 误差反向传播,求梯度
                optimizer.step()  # 将delta更新在权值与偏置值上

            # 时间结算
            end_time = time.clock()  # 训练结束时间
            time_cost += end_time - start_time  # 累计每一个batch的训练耗时

            train_loss_clf_src.update(loss_clsclf_src.item())  # 在loss的和上,加一个值
            train_loss_clf_tar.update(loss_clsclf_tar.item())
            train_loss_tran.update(loss_tran.item())
            train_loss_total.update(loss.item())
            if ib % CFG['log_interval'] == 0:  # 每隔多少个batch停顿一下
                print(
                    'Train Epoch: [{}/{} ({:02d}%)], cls_Loss: {:.4f}, loss_tran: {:.4f}, total_Loss: {:.4f}'
                    .format(ep + 1, CFG['epoch'], int(100. * ib / n_batch),
                            train_loss_clf_src.avg, train_loss_tran.avg,
                            train_loss_total.avg))

            # loss统计
            for i24 in range(24):
                loss_24[i24] = loss_24[i24].item(
                ) if loss_24[i24] != 0 else loss_24[i24]  # 三目运算符
                ## 如果loss_24[i24]!=0,则表示loss_24[i24]是touch.tensor,因此使用.item()来将其转换为普通float; 如果loss_24[i24]==0,则表明其只是一个填充占位符,因此直接用
            loss_24_epoch.append(
                loss_24
            )  # # loss_24_epoch中记录着本格epoch中所有batch计算所得的24个loss,每一行就是一个batch所计算出的loss
        loss_24_epoch_avg = []  # 求取平均损失的容器
        num_batch = len(loss_24_epoch)  # batch数
        for i1 in range(24):  # 遍历24个loss值,
            loss_temp = 0
            for i2 in range(num_batch):  # 遍历每个batch
                loss_temp += loss_24_epoch[i2][i1]  # 把每个loss值沿着batch方向求和
            loss_temp_avg = loss_temp / num_batch  # 求每个batch的平均值
            loss_24_epoch_avg.append(loss_temp_avg)  # 本次epoch下的平均损失
        loss_24_collection.append(loss_24_epoch_avg)  # 将每个epoch下的24个平均损失收集起来

        multi_acc_src, multi_acc_tar = [], []  # 多分类器的正确率容器
        for ic in range(5):  # 遍历5个分支分类器
            acc_src_temp = 100. * corr_multi_src[ic].cpu().numpy() / len(
                train_loader_src.dataset)  # 利用分类正确的样本数计算正确率
            acc_tar_temp = 100. * corr_multi_tar[ic].cpu().numpy() / len(
                train_loader_tar.dataset)
            multi_acc_src.append(
                acc_src_temp)  # multi_acc_src是个list,里面存放五个分支的正确率
            multi_acc_tar.append(acc_tar_temp)
        acc_voted_src = 100. * corr_voted_src.cpu().numpy() / len(
            train_loader_src.dataset)  # 投票后的正确率
        acc_voted_tar = 100. * corr_voted_tar.cpu().numpy() / len(
            train_loader_tar.dataset)
        # 要return的结果
        acc_all = [acc_voted_src] + multi_acc_src + [
            acc_voted_tar
        ] + multi_acc_tar  # 将训练所得的所有正确率汇集到一起
        acc_collection.append(
            acc_all)  # # 从左到右依次是:1个源域投票正确率、5个源域分支正确率、1个目标域投票正确率、5个目标域分支正确率
        loss_all = [
            train_loss_clf_src.avg, train_loss_clf_tar.avg,
            train_loss_tran.avg, train_loss_total.avg
        ]
        loss_collection.append(
            loss_all)  # 从左到右依次是:源域测试集分类损失,目标域测试集分类损失,测试集迁移损失,测试集总损失
        # 屏幕打印
        print(multi_acc_src)  # 打印当前epoch下的5个分支源域正确率
        print(multi_acc_tar)  # 打印当前epoch下的5个分支目标域正确率
        print([acc_voted_src, acc_voted_tar])  # 打印源域和目标域投票后的正确率
        print('Train: source_acc:{:.2f}%({}/{}),  target_acc:{:.2f}%({}/{})\n'.
              format(acc_voted_src, corr_voted_src,
                     len(train_loader_src.dataset), acc_voted_tar,
                     corr_voted_tar, len(train_loader_tar.dataset)))
        print('########\n')

        # 测试集返回
        acc_collection_test_temp, loss_collection_test_temp, loss_24_epoch_avg_temp = test(
            model, test_loader_src, test_loader_tar, ep, CFG)
        acc_collection_test.append(acc_collection_test_temp)  # 将测试正确率qppend进去
        loss_collection_test.append(loss_collection_test_temp)  # 将测试损失append进去
        loss_24_collection_test.append(loss_24_epoch_avg_temp)
        if ('RevGrad' in CFG['tranmodel']) or ('DCTLN' in CFG['tranmodel']):
            assistant_collection.append(
                [scheduler[0].get_lr()[0], CFG['lambda'],
                 time_cost])  # scheduler[0]yu scheduler[1]的lr衰减速率相同
        else:
            assistant_collection.append(
                [scheduler.get_lr()[0], CFG['lambda'],
                 time_cost])  # 将辅助记录append进去

        # 判断当前是否保存pt文件
        result_trace = [
            acc_collection, loss_collection, acc_collection_test,
            loss_collection_test, assistant_collection
        ]  # 所有的记录合并在一起
        acc_test_tar_current = acc_collection_test_temp[
            6]  # 当前的测试正确率是第7个,也就是前面6个是训练正确率
        if acc_test_tar_current > acc_test_tar_max:  # 如果当前正确率大于最大正确率
            acc_test_tar_max = acc_test_tar_current  # 将最大正确率交换成当前正确率
            if ('RevGrad' in CFG['tranmodel']):
                pt = {
                    'model': model.state_dict(),
                    'optimizer0': optimizer[0].state_dict(),
                    'optimizer1': optimizer[1].state_dict(),
                    'scheduler0': scheduler[0].state_dict(),
                    'scheduler1': scheduler[1].state_dict(),
                    'CFG': CFG,
                    'result_trace': result_trace
                }
            else:
                pt = {
                    'model': model.state_dict(),
                    'optimizer': optimizer.state_dict(),
                    'scheduler': scheduler.state_dict(),
                    'CFG': CFG,
                    'result_trace': result_trace
                }
            torch.save(pt, CFG['dir_now'] + '/' + CFG['tranmodel'] +
                       '_maxacc.pt')  # 并且,保存当前正确率下的模型

    trace = [
        acc_collection, loss_collection, loss_24_collection,
        acc_collection_test, loss_collection_test, loss_24_collection_test,
        assistant_collection
    ]  # 记录器
    return trace
Exemple #21
0
load_model = False

if args.resume != '':
    model_dict = model.state_dict()
    pretrained_dict = torch.load(args.resume)
    pretrained_dict = {
        k: v
        for k, v in pretrained_dict.items()
        if k in model_dict and v.size() == model_dict[k].size()
    }
    model_dict.update(pretrained_dict)
    model.load_state_dict(model_dict)
    res_flag = 1

if args.cuda:
    model.cuda()

if __name__ == '__main__':
    best_loss = None
    if load_model:
        best_loss = test(0)

epoch = 0
output, label = tu.test(epoch, model, test_loader, args)
output = output.data.cpu().numpy()

print('score(s): \n')
print(output[:, 1])

print('total elapsed time: %0.2f min\n' % ((time.time() - ini_t_time) / 60.0))
### Test - stage 4
### Part of: Blivet test collection
### Author: [email protected]
### This program is under GPL licence.
import classes
import test_utils

loginst_test = test_utils.init_logging(0, None, True)

test_utils.create_new_alloc_table("vdb")
test_utils.create_new_partition("vdb", "primary", 1, -1)
test_utils.format_new_partition("{}{}".format("vdb", 1), "ext4")

loginst_test.debug("Setting up SystemPartitionFormatted_Scan")
tspf = classes.SystemPartitionFormatted_Scan("vdb", 1)
tbpf = classes.BlivetInitialization("vdb", 1).child
loginst_test.debug("Comparing object attributes")
ia = test_utils.test(tspf, tbpf)
test_utils.write_issues(ia, "Single partition formatted", 4)