Beispiel #1
0
def GeResult():
    # Priors
    torch.set_default_tensor_type('torch.cuda.FloatTensor')
    torch.cuda.set_device(0)

    # Dataset
    Dataset_validation = H5DatasetSia(root='data', mode='validation')
    Dataloader_validation = data.DataLoader(Dataset_validation,
                                            batch_size=1,
                                            num_workers=1,
                                            shuffle=True,
                                            pin_memory=True)

    # Network
    Network = SimpleNet(17)
    #net = torch.nn.DataParallel(Network, device_ids=[0])
    cudnn.benchmark = True
    Network.load_state_dict(
        torch.load(
            'weights/CEL_Tiers12_bs8_8cat10channel_SimpleNet/LCZ42_SGD_11_2999.pth'
        ))
    Network.eval()

    results = []
    results_anno = []

    for i, (Input_sen1, Input_sen2, Anno) in enumerate(Dataloader_validation):

        features = Network.features(Input_sen1.cuda(), Input_sen2.cuda())
        features256 = Network.features_256(features)

        results.append(features256[0].detach().cpu().numpy().tolist())

        results_anno.append(Anno)
        if ((i + 1) % 1000 == 0):
            print(i + 1)

    np.save('data/features/845_features256_validation_results.npy', results)
    np.save('data/features/845_features256_training_results_anno.npy',
            results_anno)
def ConstructNetwork(filepath, NetworkType, returnweight=True):
    if NetworkType == 'SimpleNet':
        Network = SimpleNet(17)
    elif NetworkType == 'SimpleNetGN':
        Network = SimpleNetGN(17)
    elif NetworkType == 'DenseNet':
        Network = DenseNet(num_classes=17)
    elif NetworkType == 'ShallowResNeXt':
        Network = ShallowResNeXt(num_classes=17, depth=11, cardinality=16)
    net = torch.nn.DataParallel(Network, device_ids=[0])
    cudnn.benchmark = True
    Network.load_state_dict(torch.load(filepath + '.pth'))
    net.eval()
    if returnweight:
        weight = np.load(filepath + '.npy')
        #weight_1 = weight_1.astype('float') / weight_1.sum(axis=0)[np.newaxis, :]    #calculate precision
        weight = weight.astype('float') / weight.sum(
            axis=1)[:, np.newaxis]  #calculate recall
        PreciWeight = torch.diag(torch.from_numpy(weight))
        return net, PreciWeight
    else:
        return net
Beispiel #3
0
def GeResult():
    # Priors
    cnt = 0
    # Dataset
    fid = h5py.File('data/round1_test_a_20181109.h5')
    Sen1_dataset = fid['sen1']
    Sen2_dataset = fid['sen2']
    torch.set_default_tensor_type('torch.cuda.FloatTensor')
    torch.cuda.set_device(0)

    filepath_1 = 'weights/CEL_MIX_ValBalanceResam_bs8_CosineShedual_SimpleNet/LCZ42_SGD_11_7213'
    filepath_2 = 'weights/CEL_Tiers13_bs8_8cat10channel_SimpleNet/LCZ42_SGD_11_1999'
    filepath_3 = 'weights/CEL_Tiers12_bs8_8cat10channel_SimpleNet/LCZ42_SGD_11_2999'
    filepath_4 = 'weights/CEL_symme_Tiers12_bs32_8cat10channel_SimpleNetGN/LCZ42_SGD_15_753'

    # load Network 1
    Network_1 = SimpleNet(17)  #Network_1 = se_resnet50_shallow(17, None)
    net_1 = torch.nn.DataParallel(Network_1, device_ids=[0])
    cudnn.benchmark = True
    Network_1.load_state_dict(torch.load(filepath_1 + '.pth'))
    net_1.eval()

    Network_2 = SimpleNet(17)
    net_2 = torch.nn.DataParallel(Network_2, device_ids=[0])
    cudnn.benchmark = True
    Network_2.load_state_dict(torch.load(filepath_2 + '.pth'))
    net_2.eval()

    Network_3 = SimpleNet(17)  #Network_3 = se_resnet50_shallow(17, None)
    net_3 = torch.nn.DataParallel(Network_3, device_ids=[0])
    cudnn.benchmark = True
    Network_3.load_state_dict(torch.load(filepath_3 + '.pth'))
    Network_3.eval()
    net_3.eval()

    Network_4 = SimpleNetGN(17)
    net_4 = torch.nn.DataParallel(Network_4, device_ids=[0])
    cudnn.benchmark = True
    Network_4.load_state_dict(torch.load(filepath_4 + '.pth'))
    net_4.eval()

    # initialisation the random forest and load the weight
    #clf = RandomForestClassifier(n_estimators = 100, max_features = 'log2')
    #clf = joblib.load('100_estimator_max_features_log2_RandomForest.pkl')

    #weight for each model
    weight_1 = np.load(filepath_1 + '.npy')
    #weight_1 = weight_1.astype('float') / weight_1.sum(axis=0)[np.newaxis, :]    #calculate precision
    weight_1 = weight_1.astype('float') / weight_1.sum(
        axis=1)[:, np.newaxis]  #calculate recall
    PreciWeight_1 = torch.diag(torch.from_numpy(weight_1))

    weight_2 = np.load(filepath_2 + '.npy')
    #weight_2 = weight_2.astype('float') / weight_2.sum(axis=0)[np.newaxis, :]    #calculate precision
    weight_2 = weight_2.astype('float') / weight_2.sum(
        axis=1)[:, np.newaxis]  #calculate recall
    PreciWeight_2 = torch.diag(torch.from_numpy(weight_2))

    weight_3 = np.load(filepath_3 + '.npy')
    #weight_3 = weight_3.astype('float') / weight_3.sum(axis=0)[np.newaxis, :]    #calculate precision
    weight_3 = weight_3.astype('float') / weight_3.sum(
        axis=1)[:, np.newaxis]  #calculate recall
    PreciWeight_3 = torch.diag(torch.from_numpy(weight_3))

    PW = torch.nn.functional.normalize(
        torch.cat((PreciWeight_1.view(17, -1), PreciWeight_2.view(
            17, -1), PreciWeight_3.view(17, -1)), 1),
        1)  #concat two weight and normalise them

    # load mean and std
    sen1mean = np.load('data/mean_et_std/round1_test_a_20181109_mean_sen1.npy')
    sen1std = np.load('data/mean_et_std/round1_test_a_20181109_std_sen1.npy')

    sen2mean = np.load('data/mean_et_std/round1_test_a_20181109_mean_sen2.npy')
    sen2std = np.load('data/mean_et_std/round1_test_a_20181109_std_sen2.npy')
    NN851 = []
    #open csv file and write result
    with open('1223.csv',
              'wb') as csvfile:  # with open as   closed automatically
        f = csv.writer(csvfile, delimiter=',')

        for index in range(len(Sen1_dataset)):
            Input_sen1 = (Sen1_dataset[index] - sen1mean) / sen1std
            Input_sen2 = (Sen2_dataset[index] - sen2mean) / sen2std

            Input_sen1 = torch.from_numpy(Input_sen1).permute(2, 0, 1).type(
                torch.FloatTensor)
            Input_sen1 = Input_sen1.unsqueeze(0)

            Input_sen2 = torch.from_numpy(Input_sen2).permute(2, 0, 1).type(
                torch.FloatTensor)
            Input_sen2 = Input_sen2.unsqueeze(0)

            preds_1 = net_1.forward(Input_sen1.cuda(), Input_sen2.cuda())
            #_, pred_1 = preds_1.data.topk(1, 1, True, True)
            #result_1.append(pred_1.item())

            preds_2 = net_2.forward(Input_sen1.cuda(), Input_sen2.cuda())
            #_, pred_2 = preds_2.data.topk(1, 1, True, True)
            #result_2.append(pred_1.item())
            preds_3 = net_3.forward(Input_sen1.cuda(), Input_sen2.cuda())

            preds_4 = net_4.forward(Input_sen1.cuda(), Input_sen2.cuda())
            preds = (torch.nn.functional.normalize(preds_1) +
                     torch.nn.functional.normalize(preds_2) +
                     torch.nn.functional.normalize(preds_3) +
                     torch.nn.functional.normalize(preds_4))
            #preds = torch.nn.functional.normalize(preds_1)*PW[:, 0].float().cuda() + torch.nn.functional.normalize(preds_2)*PW[:, 1].float().cuda() + torch.nn.functional.normalize(preds_3)*PW[:, 2].float().cuda()
            conf, pred = preds.data.topk(1, 1, True, True)

            #RF_Pred = clf.predict(preds[0].detach().cpu().numpy().reshape(1,-1)).tolist()
            #class_label = 18
            csvrow = []
            #RF851.append(RF_Pred[0])
            NN851.append(pred.item())
            for i in range(17):
                if i == pred.item():
                    csvrow.append('1')
                else:
                    csvrow.append('0')
            f.writerow(csvrow)
    np.save('NN851byBacth.npy', NN851)
Beispiel #4
0
def GeResult():
    # Priors

    # Dataset
    Dataset_validation = H5DatasetSia(root='data', mode='validation')
    Dataloader_validation = data.DataLoader(Dataset_validation,
                                            batch_size=1,
                                            num_workers=1,
                                            shuffle=True,
                                            pin_memory=True)

    torch.set_default_tensor_type('torch.cuda.FloatTensor')
    torch.cuda.set_device(0)
    class_names = [
        'Compact high-rise', 'Compact midrise', 'Compact lowrise',
        'Open high-rise', 'Open midrise', 'Open lowrise',
        'Lightweight low-rise', 'Large low-rise', 'Sparsely built',
        'Heavy industry', 'Dense trees', 'Scattered trees', 'Bush and scrub',
        'Low plants', 'Bare rock or paved', 'Bare soil or sand', 'Water'
    ]

    # Network
    filepath_1 = 'weights/CEL_bs8_8cat10channel_ShallowResNeXt/LCZ42_SGD_2_1999'
    filepath_2 = 'weights/8cat10channel_SimpleNet/LCZ42_SGD_1_41999'
    filepath_3 = 'weights/WithDropOut_bs8_sen2_se_resnet50_shallow/LCZ42_SGD_1_15999'

    Network_1 = ShallowResNeXt(num_classes=17, depth=11, cardinality=16)
    net_1 = torch.nn.DataParallel(Network_1, device_ids=[0])
    cudnn.benchmark = True
    Network_1.load_state_dict(torch.load(filepath_1 + '.pth'))
    net_1.eval()
    """
    Network_1 = SimpleNet(17)
    net_1 = torch.nn.DataParallel(Network_1, device_ids=[0])
    cudnn.benchmark = True    
    Network_1.load_state_dict(torch.load(filepath_1 + '.pth'))
    net_1.eval()
    """

    Network_2 = SimpleNet(17)
    net_2 = torch.nn.DataParallel(Network_2, device_ids=[0])
    cudnn.benchmark = True
    Network_2.load_state_dict(torch.load(filepath_2 + '.pth'))
    net_2.eval()

    Network_3 = se_resnet50_shallow(17, None)
    net_3 = torch.nn.DataParallel(Network_3, device_ids=[0])
    cudnn.benchmark = True
    Network_3.load_state_dict(torch.load(filepath_3 + '.pth'))
    net_3.eval()

    # initialisation the random forest and load the weight
    clf = RandomForestClassifier(n_estimators=40, max_features='log2')
    clf = joblib.load('40_estimator_max_features_log2_RandomForest.pkl')

    #load weight determined by precision
    weight_1 = np.load(filepath_1 + '.npy')
    #weight_1 = weight_1.astype('float') / weight_1.sum(axis=0)[np.newaxis, :]    #calculate precision
    weight_1 = weight_1.astype('float') / weight_1.sum(
        axis=1)[:, np.newaxis]  #calculate recall
    PreciWeight_1 = torch.diag(torch.from_numpy(weight_1))

    weight_2 = np.load(filepath_2 + '.npy')
    #weight_2 = weight_2.astype('float') / weight_2.sum(axis=0)[np.newaxis, :]    #calculate precision
    weight_2 = weight_2.astype('float') / weight_2.sum(
        axis=1)[:, np.newaxis]  #calculate recall
    PreciWeight_2 = torch.diag(torch.from_numpy(weight_2))

    weight_3 = np.load(filepath_3 + '.npy')
    #weight_3 = weight_3.astype('float') / weight_3.sum(axis=0)[np.newaxis, :]    #calculate precision
    weight_3 = weight_3.astype('float') / weight_3.sum(
        axis=1)[:, np.newaxis]  #calculate recall
    PreciWeight_3 = torch.diag(torch.from_numpy(weight_3))

    PW = torch.nn.functional.normalize(
        torch.cat((PreciWeight_1.view(17, -1), PreciWeight_2.view(
            17, -1), PreciWeight_3.view(17, -1)), 1),
        1)  #concat two weight and normalise them
    result_tensor = []
    result_permodel = [[], [], [], []]
    results = {'Pred': [], 'Conf': [], 'Anno': []}
    results_underThresh = {'Pred_RF': [], 'Pred': [], 'Anno': []}
    results_anno = []
    for i, (Input_sen1, Input_sen2, sen2nonstd,
            Anno) in enumerate(Dataloader_validation):
        Input_sen1 = Input_sen1.cuda()
        Input_sen2 = Input_sen2.cuda()
        sen2nonstd = sen2nonstd.cuda()

        preds_1 = net_1.forward(Input_sen1, Input_sen2)
        #preds_1normal = torch.nn.functional.normalize(preds_1)
        conf_1, pred_1 = preds_1.data.topk(1, 1, True, True)
        result_permodel[1].append(pred_1.item())

        preds_2 = net_2.forward(Input_sen1, Input_sen2)
        #preds_2normal = torch.nn.functional.normalize(preds_2)
        conf_2, pred_2 = preds_2.data.topk(1, 1, True, True)
        result_permodel[2].append(pred_2.item())

        preds_3 = net_3.forward(sen2nonstd)
        #preds_3normal = torch.nn.functional.normalize(preds_3)
        conf_3, pred_3 = preds_3.data.topk(1, 1, True, True)
        result_permodel[3].append(pred_3.item())

        preds = torch.nn.functional.normalize(preds_1) * PW[:, 0].float().cuda(
        ) + torch.nn.functional.normalize(preds_2) * PW[:, 1].float().cuda(
        ) + torch.nn.functional.normalize(preds_3) * PW[:, 2].float().cuda()
        RF_Pred = clf.predict(preds[0].detach().cpu().numpy().reshape(
            1, -1)).tolist()
        #preds = preds_1normal*PW[:, 0].float().cuda() + preds_1normal*PW[:, 1].float().cuda() + preds_3normal*PW[:, 2].float().cuda()
        #result_tensor.append(preds[0].detach().cpu().numpy().tolist())
        conf, pred = preds.data.topk(1, 1, True, True)

        results_anno.append(Anno)
        #append prediction results
        results['Pred'].append(pred.item())  #RF_Pred[0])
        results['Conf'].append(conf.item())
        results['Anno'].append(Anno)
        '''
        if conf_select.item() > 0.5:
            results['Pred'].append(pred_select.item())
        else :
            results['Pred'].append(pred.item())
        '''
        if (i % 10000 == 0):
            print(i)
        #append annotation results
        #print(conf.item())
        if conf > 0.7:
            results_underThresh['Pred'].append(pred.item())
            results_underThresh['Pred_RF'].append(RF_Pred[0])
            #append annotation results
            results_underThresh['Anno'].append(Anno.item())

    #np.save('Realresult_tensor.npy',result_tensor)
    #np.save('Real_training_results_anno.npy', results_anno)
    #np.save('results.npy', results)
    print(
        accuracy_score(results_underThresh['Pred'],
                       results_underThresh['Anno'],
                       normalize=True))
    print(
        accuracy_score(results_underThresh['Pred_RF'],
                       results_underThresh['Anno'],
                       normalize=True))
    print(
        accuracy_score(result_permodel[1], result_permodel[2], normalize=True))
    print(len(results_underThresh['Pred']), len(Dataloader_validation))

    print('Accuracy under threshold: %0.6f' %
          (accuracy_score(results_underThresh['Pred'],
                          results_underThresh['Anno'],
                          normalize=True)))

    print('Accuracy: %0.6f' %
          (accuracy_score(results['Pred'], results['Anno'], normalize=True)))
    cnf_matrix = confusion_matrix(results['Anno'], results['Pred'])
    #np.save('results/confmat_Merge.npy',cnf_matrix)
    #np.set_printoptions(precision=2)
    # Plot non-normalized confusion matrix
    cnf_tr = np.trace(cnf_matrix)
    cnf_tr = cnf_tr.astype('float')

    print(cnf_tr / len(Dataset_validation))
    plt.figure()
    plot_confusion_matrix(cnf_matrix,
                          classes=class_names,
                          title='Confusion matrix, without normalization')
    plt.figure()
    plot_confusion_matrix(cnf_matrix,
                          classes=class_names,
                          normalize=True,
                          title='Normalized confusion matrix')
    plt.show()
def GeResult():
    # Priors
    cnt = 0
    mode = 'round1_test_a_20181109'
    '''
    826
    filepath_1 = 'weights/MIX_CEL_lr4e-3_bs8_8cat10channel_SimpleNet/LCZ42_SGD_7_1999'
    filepath_2 = 'weights/ReMIX_CEL_lr4e-3_bs8_8cat10channel_SimpleNet/LCZ42_SGD_3_1999'
    '''
    filepath_1 = 'weights/CEL_Tiers23_bs8_8cat10channel_SimpleNet/LCZ42_SGD_4_11999'
    filepath_2 = 'weights/CEL_Tiers13_bs8_8cat10channel_SimpleNet/LCZ42_SGD_4_15999'
    # Dataset
    fid = h5py.File('data/' + mode + '.h5')
    Sen1_dataset = fid['sen1']
    Sen2_dataset = fid['sen2']
    #Target_tensor = fid['label']
    torch.set_default_tensor_type('torch.cuda.FloatTensor')
    torch.cuda.set_device(0)

    #filepath_2 = 'weights/likeval826_CEL_bs8_8cat10channel_SimpleNet/LCZ42_SGD_9_1999'

    # load Network 1
    Network_1 = SimpleNet(17)
    net_1 = torch.nn.DataParallel(Network_1, device_ids=[0])
    cudnn.benchmark = True
    Network_1.load_state_dict(torch.load(filepath_1 + '.pth'))
    net_1.eval()
    # load Network 2
    Network_2 = SimpleNet(17)
    net_2 = torch.nn.DataParallel(Network_2, device_ids=[0])
    cudnn.benchmark = True
    Network_2.load_state_dict(torch.load(filepath_2 + '.pth'))
    net_2.eval()

    # initialisation the random forest and load the weight
    #clf = RandomForestClassifier(n_estimators = 40, max_features = 'log2')
    #clf = joblib.load('100_estimator_max_features_log2_RandomForest.pkl')

    #weight for each model
    weight_1 = np.load(filepath_1 + '.npy')
    weight_1 = weight_1.astype('float') / weight_1.sum(
        axis=0)[np.newaxis, :]  #calculate precision
    #weight_1 = weight_1.astype('float') / weight_1.sum(axis=1)[:, np.newaxis]     #calculate recall
    PreciWeight_1 = torch.diag(torch.from_numpy(weight_1))

    weight_2 = np.load(filepath_2 + '.npy')
    weight_2 = weight_2.astype('float') / weight_2.sum(
        axis=0)[np.newaxis, :]  #calculate precision
    #weight_2 = weight_2.astype('float') / weight_2.sum(axis=1)[:, np.newaxis]     #calculate recall
    PreciWeight_2 = torch.diag(torch.from_numpy(weight_2))

    concated_weight = torch.cat(
        (PreciWeight_1.view(17, -1), PreciWeight_2.view(17, -1)), 1)
    PW = torch.nn.functional.normalize(
        concated_weight, 1)  #concat two weight and normalise them
    PW_1 = PreciWeight_1.gt(PreciWeight_2)
    PW_2 = PreciWeight_2.gt(PreciWeight_1)

    # load mean and std
    sen1mean = np.load('data/mean_et_std/' + mode + '_mean_sen1.npy')
    sen1std = np.load('data/mean_et_std/' + mode + '_std_sen1.npy')

    sen2mean = np.load('data/mean_et_std/' + mode + '_mean_sen2.npy')
    sen2std = np.load('data/mean_et_std/' + mode + '_std_sen2.npy')
    indices = []

    #open csv file and write result
    with open('MergeTiers_23_13.csv',
              'wb') as csvfile:  # with open as   closed automatically
        f = csv.writer(csvfile, delimiter=',')

        for index in range(len(Sen1_dataset)):
            Input_sen1 = (Sen1_dataset[index] - sen1mean) / sen1std
            Input_sen2 = (Sen2_dataset[index] - sen2mean) / sen2std

            #AnnoTensor = torch.from_numpy(Target_tensor[index])
            #Anno = torch.squeeze(torch.nonzero(AnnoTensor)).item()

            sen2nonstd = torch.from_numpy(Input_sen2 * sen2std).permute(
                2, 0, 1).type(torch.FloatTensor)
            sen2nonstd = sen2nonstd.unsqueeze(0)

            Input_sen1 = torch.from_numpy(Input_sen1).permute(2, 0, 1).type(
                torch.FloatTensor)
            Input_sen1 = Input_sen1.unsqueeze(0)

            Input_sen2 = torch.from_numpy(Input_sen2).permute(2, 0, 1).type(
                torch.FloatTensor)
            Input_sen2 = Input_sen2.unsqueeze(0)

            preds_1 = net_1.forward(Input_sen1.cuda(), Input_sen2.cuda())
            #_, pred_1 = preds_1.data.topk(1, 1, True, True)
            #result_1.append(pred_1.item())

            preds_2 = net_2.forward(Input_sen1.cuda(), Input_sen2.cuda())
            #_, pred_2 = preds_2.data.topk(1, 1, True, True)
            #result_2.append(pred_1.item())

            preds = torch.nn.functional.normalize(preds_1) * PW[:, 0].float(
            ).cuda() + torch.nn.functional.normalize(
                preds_2) * PW[:, 1].float().cuda()
            conf, pred = preds.data.topk(1, 1, True, True)

            #RF_Pred = clf.predict(preds[0].detach().cpu().numpy().reshape(1,-1)).tolist()
            csvrow = []
            '''
            if (pred.item() == Anno):
                indices.append(index)
            '''
            if (index % 1000 == 0):
                print(index)
            for i in range(17):
                if i == pred.item():
                    csvrow.append('1')
                else:
                    csvrow.append('0')
            f.writerow(csvrow)
def main():
    #create model
    idxs = np.load('AnalysisTools/MulResampleIndex.npy')
    if args.MultiLabel == None :
        Dataset_train = H5Dataset(root = args.dataset_root, mode = 'training')
    else :
        Dataset_train = H5DatasetTensorAnno(root = args.dataset_root, mode = 'training', indices = idxs)
    Dataloader_train = data.DataLoader(Dataset_train, args.batch_size,
                                 num_workers = args.num_workers,
                                 shuffle = True, pin_memory = True)

    Dataset_validation = H5DatasetSia(root = args.dataset_root, mode = 'validation')
    Dataloader_validation = data.DataLoader(Dataset_validation, batch_size = 1,
                                 num_workers = args.num_workers,
                                 shuffle = True, pin_memory = True)

    torch.set_default_tensor_type('torch.cuda.FloatTensor')
    torch.cuda.set_device(0)

    if args.basenet == 'ResNeXt':
        model = CifarResNeXt(num_classes = 17, depth = 29, cardinality = 8)    
        #net = Networktorch.nn.DataParallel(Network, device_ids=[0])
        cudnn.benchmark = True
    if args.basenet == 'ShallowResNeXt':
        model = ShallowResNeXt(num_classes = 17, depth = 11, cardinality = 8)    
        #net = Networktorch.nn.DataParallel(Network, device_ids=[0])
        cudnn.benchmark = True
    elif args.basenet == 'pnasnet':
        model = pnasnet5large(args.class_num, None)    
        #net = Networktorch.nn.DataParallel(Network, device_ids=[0])
        cudnn.benchmark = True
        if args.resume:
            model.load_state_dict(torch.load(args.resume))
        else:
            state_dict = torch.load('pnasnet5large-bf079911.pth')
            state_dict.pop('last_linear.bias')
            state_dict.pop('last_linear.weight')
            model.load_state_dict(state_dict, strict = False)
            init.xavier_uniform_(model.last_linear.weight.data)
            model.last_linear.bias.data.zero_()

    elif args.basenet == 'se_resnet101':
        model = se_resnet101(args.class_num, None)    
        #net = Networktorch.nn.DataParallel(Network, device_ids=[0])
        cudnn.benchmark = True
        if args.resume:
            model.load_state_dict(torch.load(args.resume))
        else:
            state_dict = torch.load('se_resnet101-7e38fcc6.pth')
            state_dict.pop('last_linear.bias')
            state_dict.pop('last_linear.weight')
            model.load_state_dict(state_dict, strict = False)
            init.xavier_uniform_(model.last_linear.weight.data)
            model.last_linear.bias.data.zero_()

    elif args.basenet == 'se_resnet50':
        model = se_resnet50(args.class_num, None)    
        #net = Networktorch.nn.DataParallel(Network, device_ids=[0])
        cudnn.benchmark = True

    elif args.basenet == 'se_resnet50_shallow':
        model = se_resnet50_shallow(args.class_num, None)    
        #net = Networktorch.nn.DataParallel(Network, device_ids=[0])
        cudnn.benchmark = True

    elif args.basenet == 'se_resnet50_shallow_sia':
        model = se_resnet50_shallow_sia(args.class_num, None)    
        #net = Networktorch.nn.DataParallel(Network, device_ids=[0])
        cudnn.benchmark = True

    elif args.basenet == 'SimpleNet':
        model = SimpleNet(args.class_num)    
        #net = Networktorch.nn.DataParallel(Network, device_ids=[0])
        cudnn.benchmark = True
    elif args.basenet == 'SimpleNetLeaky':
        model = SimpleNetLeaky(args.class_num)    
        #net = Networktorch.nn.DataParallel(Network, device_ids=[0])
        cudnn.benchmark = True
    elif args.basenet == 'se_resnext101_32x4d':
        model = se_resnext101_32x4d(args.class_num, None)    
        #net = Networktorch.nn.DataParallel(Network, device_ids=[0])
        cudnn.benchmark = True
        if args.resume:
            model.load_state_dict(torch.load(args.resume))
        else:
            state_dict = torch.load('se_resnext101_32x4d-3b2fe3d8.pth')
            state_dict.pop('last_linear.bias')
            state_dict.pop('last_linear.weight')
            model.load_state_dict(state_dict, strict = False)
            init.xavier_uniform_(model.last_linear.weight.data)
            model.last_linear.bias.data.zero_()

    model = model.cuda()
    cudnn.benchmark = True

    #weights = torch.FloatTensor(weights)
    if args.MultiLabel == None :
        criterion = nn.CrossEntropyLoss().cuda()
    else :
        criterion = nn.SoftMarginLoss().cuda()

    Optimizer = optim.SGD(filter(lambda p: p.requires_grad, model.parameters()), lr = args.lr, momentum = args.momentum,
                          weight_decay = args.weight_decay, nesterov = True)
    #torch.save(model.state_dict(), 'weights/SML_8cat10channel_'+ args.basenet +'/'+ 'LCZ42_SGD' + '.pth')

    for epoch in range(args.start_epoch, args.epochs):
        #adjust_learning_rate(Optimizer, epoch)
        # train for one epoch
        train(Dataloader_train, model, criterion, Optimizer, epoch, Dataloader_validation)    #train(Dataloader_train, Network, criterion, Optimizer, epoch)
Beispiel #7
0
def GeResult():
    # Priors
    test = False
    # Dataset
    mode = 'training'
    fid = h5py.File('data/' + mode + '.h5')
    Sen1_dataset = fid['sen1']
    Sen2_dataset = fid['sen2']

    if test != True:
        label = fid['label']

    torch.set_default_tensor_type('torch.cuda.FloatTensor')
    torch.cuda.set_device(0)
    class_names = [
        'Compact high-rise', 'Compact midrise', 'Compact lowrise',
        'Open high-rise', 'Open midrise', 'Open lowrise',
        'Lightweight low-rise', 'Large low-rise', 'Sparsely built',
        'Heavy industry', 'Dense trees', 'Scattered trees', 'Bush and scrub',
        'Low plants', 'Bare rock or paved', 'Bare soil or sand', 'Water'
    ]

    # Network
    filepath_1 = 'weights/CEL_MIX_ValBalanceResam_bs8_CosineShedual_SimpleNet/LCZ42_SGD_11_7213'
    filepath_2 = 'weights/CEL_Tiers13_bs8_8cat10channel_SimpleNet/LCZ42_SGD_11_1999'
    filepath_3 = 'weights/CEL_Tiers12_bs8_8cat10channel_SimpleNet/LCZ42_SGD_11_2999'
    #Network_1 = ShallowResNeXt(num_classes = 17, depth = 11, cardinality = 16)
    Network_1 = SimpleNet(17)
    net_1 = torch.nn.DataParallel(Network_1, device_ids=[0])
    cudnn.benchmark = True
    Network_1.load_state_dict(torch.load(filepath_1 + '.pth'))
    net_1.eval()

    Network_2 = SimpleNet(17)
    net_2 = torch.nn.DataParallel(Network_2, device_ids=[0])
    cudnn.benchmark = True
    Network_2.load_state_dict(torch.load(filepath_2 + '.pth'))
    net_2.eval()

    Network_3 = SimpleNet(17)  #Network_3 = se_resnet50_shallow(17, None)
    net_3 = torch.nn.DataParallel(Network_3, device_ids=[0])
    cudnn.benchmark = True
    Network_3.load_state_dict(torch.load(filepath_3 + '.pth'))
    Network_3.eval()
    net_3.eval()

    # initialisation the random forest and load the weight
    clf = RandomForestClassifier(n_estimators=100, max_features='log2')
    clf = joblib.load('100_estimator_max_features_log2_RandomForest.pkl')

    #load weight determined by precision
    weight_1 = np.load(filepath_1 + '.npy')
    #weight_1 = weight_1.astype('float') / weight_1.sum(axis=0)[np.newaxis, :]    #calculate precision
    weight_1 = weight_1.astype('float') / weight_1.sum(
        axis=1)[:, np.newaxis]  #calculate recall
    PreciWeight_1 = torch.diag(torch.from_numpy(weight_1))

    weight_2 = np.load(filepath_2 + '.npy')
    #weight_2 = weight_2.astype('float') / weight_2.sum(axis=0)[np.newaxis, :]    #calculate precision
    weight_2 = weight_2.astype('float') / weight_2.sum(
        axis=1)[:, np.newaxis]  #calculate recall
    PreciWeight_2 = torch.diag(torch.from_numpy(weight_2))

    weight_3 = np.load(filepath_3 + '.npy')
    #weight_3 = weight_3.astype('float') / weight_3.sum(axis=0)[np.newaxis, :]    #calculate precision
    weight_3 = weight_3.astype('float') / weight_3.sum(
        axis=1)[:, np.newaxis]  #calculate recall
    PreciWeight_3 = torch.diag(torch.from_numpy(weight_3))

    #PW = torch.nn.functional.normalize(torch.cat((PreciWeight_1.view(17,-1), PreciWeight_3.view(17,-1)), 1), 1)
    PW = torch.nn.functional.normalize(
        torch.cat((PreciWeight_1.view(17, -1), PreciWeight_2.view(
            17, -1), PreciWeight_3.view(17, -1)), 1), 1)
    #concat two weight and normalise them

    sen1mean = np.load('data/mean_et_std/' + mode + '_mean_sen1.npy')
    sen1std = np.load('data/mean_et_std/' + mode + '_std_sen1.npy')

    sen2mean = np.load('data/mean_et_std/' + mode + '_mean_sen2.npy')
    sen2std = np.load('data/mean_et_std/' + mode + '_std_sen2.npy')

    correct_index = []
    result_tensor = []
    result_permodel = [[], [], [], [], []]
    results = {'Pred': [], 'Conf': [], 'Anno': []}
    results_underThresh = {'Pred': [], 'Anno': []}
    results_anno = []
    soft_labels = []
    for index in range(len(Sen1_dataset)):

        Input_sen1 = (Sen1_dataset[index] - sen1mean) / sen1std
        Input_sen2 = (Sen2_dataset[index] - sen2mean) / sen2std

        Input_sen1 = torch.from_numpy(Input_sen1).permute(2, 0, 1).type(
            torch.FloatTensor)
        Input_sen1 = Input_sen1.unsqueeze(0)

        Input_sen2 = torch.from_numpy(Input_sen2).permute(2, 0, 1).type(
            torch.FloatTensor)
        Input_sen2 = Input_sen2.unsqueeze(0)

        Input_sen1 = Input_sen1.cuda()
        Input_sen2 = Input_sen2.cuda()

        if test != True:
            AnnoTensor = torch.from_numpy(label[index])
            Anno = torch.squeeze(torch.nonzero(AnnoTensor)).item()
        #sen2nonstd = sen2nonstd.cuda()

        preds_1 = net_1.forward(Input_sen1, Input_sen2)
        #preds_1normal = torch.nn.functional.normalize(preds_1)
        conf_1, pred_1 = preds_1.data.topk(1, 1, True, True)
        result_permodel[1].append(pred_1.item())

        preds_2 = net_2.forward(Input_sen1, Input_sen2)
        #preds_2normal = torch.nn.functional.normalize(preds_2)
        conf_2, pred_2 = preds_2.data.topk(1, 1, True, True)
        result_permodel[2].append(pred_2.item())

        preds_3 = net_3.forward(Input_sen1, Input_sen2)
        #preds_3normal = torch.nn.functional.normalize(preds_3)
        conf_3, pred_3 = preds_3.data.topk(1, 1, True, True)
        result_permodel[3].append(pred_3.item())

        #preds = preds_1 + preds_2
        #preds = torch.nn.functional.normalize(preds_1)*PW[:, 0].float().cuda() + torch.nn.functional.normalize(preds_3)*PW[:, 1].float().cuda()
        preds = torch.nn.functional.normalize(preds_1) * PW[:, 0].float().cuda(
        ) + torch.nn.functional.normalize(preds_2) * PW[:, 1].float().cuda(
        ) + torch.nn.functional.normalize(preds_3) * PW[:, 2].float().cuda()

        conf, pred = preds.data.topk(1, 1, True, True)
        #if(pred.item() == np.nonzero(label[index])[0][0]):
        #print(pred.item(), np.nonzero(label[index])[0][0], index)
        soft_labels.append(preds[0].detach().cpu().numpy().tolist())
        #correct_index.append(index)
        #results_anno.append(Anno)
        #append prediction results
        results['Pred'].append(pred.item())  #RF_Pred[0])
        results['Conf'].append(conf.item())
        results['Anno'].append(np.nonzero(label[index])[0][0])

        if (index % 1000 == 0):
            print(index)
        #append annotation results
        #print(conf.item())

    np.save(mode + '_soft_labels_851.npy', soft_labels)
    #np.save(mode + '_correct_index_851.npy',correct_index)
    #np.save('round1_test_a_20181109_results_anno.npy', results['Pred'])
    #np.save('results.npy', results)
    print('Accuracy of merged Models: %0.6f' %
          (accuracy_score(results['Pred'], results['Anno'], normalize=True)))
Beispiel #8
0
def GeResult():
    # Priors

    # Dataset
    fid = h5py.File('data/round1_test_a_20181109.h5')
    Sen1_dataset = fid['sen1']
    Sen2_dataset = fid['sen2']
    torch.set_default_tensor_type('torch.cuda.FloatTensor')
    torch.cuda.set_device(0)

    # Network

    #Network = pnasnet5large(6, None)
    #Network = ResNeXt101_64x4d(6)
    Network = SimpleNet(17)  #Network_1 = se_resnet50_shallow(17, None)
    #Network = se_resnet50(17, None)
    net = torch.nn.DataParallel(Network, device_ids=[0])
    cudnn.benchmark = True

    Network.load_state_dict(
        torch.load(
            'weights/Distillation_MIX_ValBalanceResam_bs8_SimpleNet/LCZ42_SGD_11_7213.pth'
        ))
    net.eval()

    sen1mean = np.load('data/mean_et_std/round1_test_a_20181109_mean_sen1.npy')
    sen1std = np.load('data/mean_et_std/round1_test_a_20181109_std_sen1.npy')

    sen2mean = np.load('data/mean_et_std/round1_test_a_20181109_mean_sen2.npy')
    sen2std = np.load('data/mean_et_std/round1_test_a_20181109_std_sen2.npy')

    results = []
    with open('Distillation_11_7213.csv',
              'wb') as csvfile:  # with open as   closed automatically
        f = csv.writer(csvfile, delimiter=',')

        for index in range(len(Sen1_dataset)):
            Input_sen1 = (Sen1_dataset[index] - sen1mean) / sen1std
            Input_sen2 = (Sen2_dataset[index] - sen2mean) / sen2std

            Input_sen1 = torch.from_numpy(Input_sen1).permute(2, 0, 1).type(
                torch.FloatTensor)
            Input_sen1 = Input_sen1.unsqueeze(0)
            Input_sen1 = Input_sen1.cuda()

            Input_sen2 = torch.from_numpy(Input_sen2).permute(2, 0, 1).type(
                torch.FloatTensor)
            Input_sen2 = Input_sen2.unsqueeze(0)
            Input_sen2 = Input_sen2.cuda()

            preds = net.forward(Input_sen1, Input_sen2)
            _, pred = preds.data.topk(1, 1, True, True)

            results.append(pred.item())
            csvrow = []
            for i in range(17):
                if i == pred.item():
                    csvrow.append('1')
                else:
                    csvrow.append('0')
            f.writerow(csvrow)
        np.save('Single_model_result.npy', results)