Ejemplo n.º 1
0
def GeResult():
    # Priors
    torch.set_default_tensor_type('torch.cuda.FloatTensor')
    torch.cuda.set_device(0)

    # Dataset
    Dataset_validation = H5DatasetSia(root='data', mode='validation')
    Dataloader_validation = data.DataLoader(Dataset_validation,
                                            batch_size=1,
                                            num_workers=1,
                                            shuffle=True,
                                            pin_memory=True)

    # Network
    Network = SimpleNet(17)
    #net = torch.nn.DataParallel(Network, device_ids=[0])
    cudnn.benchmark = True
    Network.load_state_dict(
        torch.load(
            'weights/CEL_Tiers12_bs8_8cat10channel_SimpleNet/LCZ42_SGD_11_2999.pth'
        ))
    Network.eval()

    results = []
    results_anno = []

    for i, (Input_sen1, Input_sen2, Anno) in enumerate(Dataloader_validation):

        features = Network.features(Input_sen1.cuda(), Input_sen2.cuda())
        features256 = Network.features_256(features)

        results.append(features256[0].detach().cpu().numpy().tolist())

        results_anno.append(Anno)
        if ((i + 1) % 1000 == 0):
            print(i + 1)

    np.save('data/features/845_features256_validation_results.npy', results)
    np.save('data/features/845_features256_training_results_anno.npy',
            results_anno)
Ejemplo n.º 2
0
def GeResult():
    # Priors
    cnt = 0
    # Dataset
    fid = h5py.File('data/round1_test_a_20181109.h5')
    Sen1_dataset = fid['sen1']
    Sen2_dataset = fid['sen2']
    torch.set_default_tensor_type('torch.cuda.FloatTensor')
    torch.cuda.set_device(0)

    filepath_1 = 'weights/CEL_MIX_ValBalanceResam_bs8_CosineShedual_SimpleNet/LCZ42_SGD_11_7213'
    filepath_2 = 'weights/CEL_Tiers13_bs8_8cat10channel_SimpleNet/LCZ42_SGD_11_1999'
    filepath_3 = 'weights/CEL_Tiers12_bs8_8cat10channel_SimpleNet/LCZ42_SGD_11_2999'
    filepath_4 = 'weights/CEL_symme_Tiers12_bs32_8cat10channel_SimpleNetGN/LCZ42_SGD_15_753'

    # load Network 1
    Network_1 = SimpleNet(17)  #Network_1 = se_resnet50_shallow(17, None)
    net_1 = torch.nn.DataParallel(Network_1, device_ids=[0])
    cudnn.benchmark = True
    Network_1.load_state_dict(torch.load(filepath_1 + '.pth'))
    net_1.eval()

    Network_2 = SimpleNet(17)
    net_2 = torch.nn.DataParallel(Network_2, device_ids=[0])
    cudnn.benchmark = True
    Network_2.load_state_dict(torch.load(filepath_2 + '.pth'))
    net_2.eval()

    Network_3 = SimpleNet(17)  #Network_3 = se_resnet50_shallow(17, None)
    net_3 = torch.nn.DataParallel(Network_3, device_ids=[0])
    cudnn.benchmark = True
    Network_3.load_state_dict(torch.load(filepath_3 + '.pth'))
    Network_3.eval()
    net_3.eval()

    Network_4 = SimpleNetGN(17)
    net_4 = torch.nn.DataParallel(Network_4, device_ids=[0])
    cudnn.benchmark = True
    Network_4.load_state_dict(torch.load(filepath_4 + '.pth'))
    net_4.eval()

    # initialisation the random forest and load the weight
    #clf = RandomForestClassifier(n_estimators = 100, max_features = 'log2')
    #clf = joblib.load('100_estimator_max_features_log2_RandomForest.pkl')

    #weight for each model
    weight_1 = np.load(filepath_1 + '.npy')
    #weight_1 = weight_1.astype('float') / weight_1.sum(axis=0)[np.newaxis, :]    #calculate precision
    weight_1 = weight_1.astype('float') / weight_1.sum(
        axis=1)[:, np.newaxis]  #calculate recall
    PreciWeight_1 = torch.diag(torch.from_numpy(weight_1))

    weight_2 = np.load(filepath_2 + '.npy')
    #weight_2 = weight_2.astype('float') / weight_2.sum(axis=0)[np.newaxis, :]    #calculate precision
    weight_2 = weight_2.astype('float') / weight_2.sum(
        axis=1)[:, np.newaxis]  #calculate recall
    PreciWeight_2 = torch.diag(torch.from_numpy(weight_2))

    weight_3 = np.load(filepath_3 + '.npy')
    #weight_3 = weight_3.astype('float') / weight_3.sum(axis=0)[np.newaxis, :]    #calculate precision
    weight_3 = weight_3.astype('float') / weight_3.sum(
        axis=1)[:, np.newaxis]  #calculate recall
    PreciWeight_3 = torch.diag(torch.from_numpy(weight_3))

    PW = torch.nn.functional.normalize(
        torch.cat((PreciWeight_1.view(17, -1), PreciWeight_2.view(
            17, -1), PreciWeight_3.view(17, -1)), 1),
        1)  #concat two weight and normalise them

    # load mean and std
    sen1mean = np.load('data/mean_et_std/round1_test_a_20181109_mean_sen1.npy')
    sen1std = np.load('data/mean_et_std/round1_test_a_20181109_std_sen1.npy')

    sen2mean = np.load('data/mean_et_std/round1_test_a_20181109_mean_sen2.npy')
    sen2std = np.load('data/mean_et_std/round1_test_a_20181109_std_sen2.npy')
    NN851 = []
    #open csv file and write result
    with open('1223.csv',
              'wb') as csvfile:  # with open as   closed automatically
        f = csv.writer(csvfile, delimiter=',')

        for index in range(len(Sen1_dataset)):
            Input_sen1 = (Sen1_dataset[index] - sen1mean) / sen1std
            Input_sen2 = (Sen2_dataset[index] - sen2mean) / sen2std

            Input_sen1 = torch.from_numpy(Input_sen1).permute(2, 0, 1).type(
                torch.FloatTensor)
            Input_sen1 = Input_sen1.unsqueeze(0)

            Input_sen2 = torch.from_numpy(Input_sen2).permute(2, 0, 1).type(
                torch.FloatTensor)
            Input_sen2 = Input_sen2.unsqueeze(0)

            preds_1 = net_1.forward(Input_sen1.cuda(), Input_sen2.cuda())
            #_, pred_1 = preds_1.data.topk(1, 1, True, True)
            #result_1.append(pred_1.item())

            preds_2 = net_2.forward(Input_sen1.cuda(), Input_sen2.cuda())
            #_, pred_2 = preds_2.data.topk(1, 1, True, True)
            #result_2.append(pred_1.item())
            preds_3 = net_3.forward(Input_sen1.cuda(), Input_sen2.cuda())

            preds_4 = net_4.forward(Input_sen1.cuda(), Input_sen2.cuda())
            preds = (torch.nn.functional.normalize(preds_1) +
                     torch.nn.functional.normalize(preds_2) +
                     torch.nn.functional.normalize(preds_3) +
                     torch.nn.functional.normalize(preds_4))
            #preds = torch.nn.functional.normalize(preds_1)*PW[:, 0].float().cuda() + torch.nn.functional.normalize(preds_2)*PW[:, 1].float().cuda() + torch.nn.functional.normalize(preds_3)*PW[:, 2].float().cuda()
            conf, pred = preds.data.topk(1, 1, True, True)

            #RF_Pred = clf.predict(preds[0].detach().cpu().numpy().reshape(1,-1)).tolist()
            #class_label = 18
            csvrow = []
            #RF851.append(RF_Pred[0])
            NN851.append(pred.item())
            for i in range(17):
                if i == pred.item():
                    csvrow.append('1')
                else:
                    csvrow.append('0')
            f.writerow(csvrow)
    np.save('NN851byBacth.npy', NN851)
Ejemplo n.º 3
0
def GeResult():
    # Priors
    test = False
    # Dataset
    mode = 'training'
    fid = h5py.File('data/' + mode + '.h5')
    Sen1_dataset = fid['sen1']
    Sen2_dataset = fid['sen2']

    if test != True:
        label = fid['label']

    torch.set_default_tensor_type('torch.cuda.FloatTensor')
    torch.cuda.set_device(0)
    class_names = [
        'Compact high-rise', 'Compact midrise', 'Compact lowrise',
        'Open high-rise', 'Open midrise', 'Open lowrise',
        'Lightweight low-rise', 'Large low-rise', 'Sparsely built',
        'Heavy industry', 'Dense trees', 'Scattered trees', 'Bush and scrub',
        'Low plants', 'Bare rock or paved', 'Bare soil or sand', 'Water'
    ]

    # Network
    filepath_1 = 'weights/CEL_MIX_ValBalanceResam_bs8_CosineShedual_SimpleNet/LCZ42_SGD_11_7213'
    filepath_2 = 'weights/CEL_Tiers13_bs8_8cat10channel_SimpleNet/LCZ42_SGD_11_1999'
    filepath_3 = 'weights/CEL_Tiers12_bs8_8cat10channel_SimpleNet/LCZ42_SGD_11_2999'
    #Network_1 = ShallowResNeXt(num_classes = 17, depth = 11, cardinality = 16)
    Network_1 = SimpleNet(17)
    net_1 = torch.nn.DataParallel(Network_1, device_ids=[0])
    cudnn.benchmark = True
    Network_1.load_state_dict(torch.load(filepath_1 + '.pth'))
    net_1.eval()

    Network_2 = SimpleNet(17)
    net_2 = torch.nn.DataParallel(Network_2, device_ids=[0])
    cudnn.benchmark = True
    Network_2.load_state_dict(torch.load(filepath_2 + '.pth'))
    net_2.eval()

    Network_3 = SimpleNet(17)  #Network_3 = se_resnet50_shallow(17, None)
    net_3 = torch.nn.DataParallel(Network_3, device_ids=[0])
    cudnn.benchmark = True
    Network_3.load_state_dict(torch.load(filepath_3 + '.pth'))
    Network_3.eval()
    net_3.eval()

    # initialisation the random forest and load the weight
    clf = RandomForestClassifier(n_estimators=100, max_features='log2')
    clf = joblib.load('100_estimator_max_features_log2_RandomForest.pkl')

    #load weight determined by precision
    weight_1 = np.load(filepath_1 + '.npy')
    #weight_1 = weight_1.astype('float') / weight_1.sum(axis=0)[np.newaxis, :]    #calculate precision
    weight_1 = weight_1.astype('float') / weight_1.sum(
        axis=1)[:, np.newaxis]  #calculate recall
    PreciWeight_1 = torch.diag(torch.from_numpy(weight_1))

    weight_2 = np.load(filepath_2 + '.npy')
    #weight_2 = weight_2.astype('float') / weight_2.sum(axis=0)[np.newaxis, :]    #calculate precision
    weight_2 = weight_2.astype('float') / weight_2.sum(
        axis=1)[:, np.newaxis]  #calculate recall
    PreciWeight_2 = torch.diag(torch.from_numpy(weight_2))

    weight_3 = np.load(filepath_3 + '.npy')
    #weight_3 = weight_3.astype('float') / weight_3.sum(axis=0)[np.newaxis, :]    #calculate precision
    weight_3 = weight_3.astype('float') / weight_3.sum(
        axis=1)[:, np.newaxis]  #calculate recall
    PreciWeight_3 = torch.diag(torch.from_numpy(weight_3))

    #PW = torch.nn.functional.normalize(torch.cat((PreciWeight_1.view(17,-1), PreciWeight_3.view(17,-1)), 1), 1)
    PW = torch.nn.functional.normalize(
        torch.cat((PreciWeight_1.view(17, -1), PreciWeight_2.view(
            17, -1), PreciWeight_3.view(17, -1)), 1), 1)
    #concat two weight and normalise them

    sen1mean = np.load('data/mean_et_std/' + mode + '_mean_sen1.npy')
    sen1std = np.load('data/mean_et_std/' + mode + '_std_sen1.npy')

    sen2mean = np.load('data/mean_et_std/' + mode + '_mean_sen2.npy')
    sen2std = np.load('data/mean_et_std/' + mode + '_std_sen2.npy')

    correct_index = []
    result_tensor = []
    result_permodel = [[], [], [], [], []]
    results = {'Pred': [], 'Conf': [], 'Anno': []}
    results_underThresh = {'Pred': [], 'Anno': []}
    results_anno = []
    soft_labels = []
    for index in range(len(Sen1_dataset)):

        Input_sen1 = (Sen1_dataset[index] - sen1mean) / sen1std
        Input_sen2 = (Sen2_dataset[index] - sen2mean) / sen2std

        Input_sen1 = torch.from_numpy(Input_sen1).permute(2, 0, 1).type(
            torch.FloatTensor)
        Input_sen1 = Input_sen1.unsqueeze(0)

        Input_sen2 = torch.from_numpy(Input_sen2).permute(2, 0, 1).type(
            torch.FloatTensor)
        Input_sen2 = Input_sen2.unsqueeze(0)

        Input_sen1 = Input_sen1.cuda()
        Input_sen2 = Input_sen2.cuda()

        if test != True:
            AnnoTensor = torch.from_numpy(label[index])
            Anno = torch.squeeze(torch.nonzero(AnnoTensor)).item()
        #sen2nonstd = sen2nonstd.cuda()

        preds_1 = net_1.forward(Input_sen1, Input_sen2)
        #preds_1normal = torch.nn.functional.normalize(preds_1)
        conf_1, pred_1 = preds_1.data.topk(1, 1, True, True)
        result_permodel[1].append(pred_1.item())

        preds_2 = net_2.forward(Input_sen1, Input_sen2)
        #preds_2normal = torch.nn.functional.normalize(preds_2)
        conf_2, pred_2 = preds_2.data.topk(1, 1, True, True)
        result_permodel[2].append(pred_2.item())

        preds_3 = net_3.forward(Input_sen1, Input_sen2)
        #preds_3normal = torch.nn.functional.normalize(preds_3)
        conf_3, pred_3 = preds_3.data.topk(1, 1, True, True)
        result_permodel[3].append(pred_3.item())

        #preds = preds_1 + preds_2
        #preds = torch.nn.functional.normalize(preds_1)*PW[:, 0].float().cuda() + torch.nn.functional.normalize(preds_3)*PW[:, 1].float().cuda()
        preds = torch.nn.functional.normalize(preds_1) * PW[:, 0].float().cuda(
        ) + torch.nn.functional.normalize(preds_2) * PW[:, 1].float().cuda(
        ) + torch.nn.functional.normalize(preds_3) * PW[:, 2].float().cuda()

        conf, pred = preds.data.topk(1, 1, True, True)
        #if(pred.item() == np.nonzero(label[index])[0][0]):
        #print(pred.item(), np.nonzero(label[index])[0][0], index)
        soft_labels.append(preds[0].detach().cpu().numpy().tolist())
        #correct_index.append(index)
        #results_anno.append(Anno)
        #append prediction results
        results['Pred'].append(pred.item())  #RF_Pred[0])
        results['Conf'].append(conf.item())
        results['Anno'].append(np.nonzero(label[index])[0][0])

        if (index % 1000 == 0):
            print(index)
        #append annotation results
        #print(conf.item())

    np.save(mode + '_soft_labels_851.npy', soft_labels)
    #np.save(mode + '_correct_index_851.npy',correct_index)
    #np.save('round1_test_a_20181109_results_anno.npy', results['Pred'])
    #np.save('results.npy', results)
    print('Accuracy of merged Models: %0.6f' %
          (accuracy_score(results['Pred'], results['Anno'], normalize=True)))