コード例 #1
0
    def __init__(self, csv_path, dataset_path, bag_refer_list, val_refer_list, logUtil=None, cuda_device=2, description=""):
        self.log = logUtil
        self.printlog("Current PID: " + str(os.getpid()))
        self.device = cuda_device
        self.description = description
        
        TrainDataset = DataUtils.DatasetLoader(csv_path, dataset_path, refer_list=np.load(bag_refer_list),
                                               mode="Train", up_size=self.UP_SIZE)
        ValDataset = DataUtils.DatasetLoader(csv_path, dataset_path, refer_list=np.load(val_refer_list), 
                                             mode="Valid", up_size=self.UP_SIZE)
        
        self.trainloader = torch.utils.data.DataLoader(TrainDataset, batch_size=self.BATCH_SIZE, num_workers=2, shuffle=True)
        self.validloader = torch.utils.data.DataLoader(ValDataset, batch_size=self.BATCH_SIZE, num_workers=2, shuffle=True)

        self.max_accu = 0
コード例 #2
0
ファイル: ValidBagging.py プロジェクト: jarrycyx/cyx_resnet
from bagging.MergeResults import BaggingResult
from Utils import DataUtils

csv_path = "q1_data/train2.csv"
dataset_path = "q1_data/train.npy"
val_refer_list = "bagging/val.npy"
BATCH_SIZE = 20
CUDA_DEVICE = 2
CLASS_NUM = 100
UP_SIZE = (224,224)

bag_pkl_paths=["./pklmodels/Class100_A_epoch_40.pkl",
                "./pklmodels/Class100_B_epoch_40.pkl",
                "./pklmodels/Class100_C_epoch_40.pkl"]

ValDataset = DataUtils.DatasetLoader(csv_path, dataset_path, refer_list=np.load(val_refer_list),
                                     mode="Valid", up_size=UP_SIZE)
validloader = torch.utils.data.DataLoader(ValDataset, batch_size=BATCH_SIZE, num_workers=2, shuffle=True)
results = BaggingResult(CUDA_DEVICE, bag_pkl_paths=bag_pkl_paths, class_num=CLASS_NUM)


merge_accuracy = []
split_accuracy = [[] for i in range(len(bag_pkl_paths))]
for i, data in enumerate(validloader):
    _, val_x, val_label = data
    merge_res, split_res = results.pred(val_x)
    merge_accuracy.append((val_label==merge_res).numpy().mean())
    print(i*BATCH_SIZE, " - ", (i+1)*BATCH_SIZE)
    
    for j in range(split_res.shape[0]):
        res = split_res[j]
        split_accuracy[j].append((val_label==res).numpy().mean())
コード例 #3
0
save_csv_path = "q1_data/samplesummision_class100.csv"
testset_path = "q1_data/test.npy"
BATCH_SIZE = 20
CUDA_DEVICE = 2
CLASS_NUM = 100
UP_SIZE = (224, 224)

csvheader = ["image_id", "fine_label"]

bag_pkl_paths = [
    "./pklmodels/Class20_A_epoch_40.pkl", "./pklmodels/Class20_B_epoch_40.pkl",
    "./pklmodels/Class20_C_epoch_40.pkl"
]

testDataset = DataUtils.DatasetLoader("q1_data/samplesummission1.csv",
                                      testset_path,
                                      mode="Test",
                                      up_size=UP_SIZE)

setsize = len(testDataset)
testloader = torch.utils.data.DataLoader(testDataset,
                                         batch_size=BATCH_SIZE,
                                         num_workers=2,
                                         shuffle=False)
results = BaggingResult(CUDA_DEVICE,
                        bag_pkl_paths=bag_pkl_paths,
                        class_num=CLASS_NUM)

resdata = -np.ones(setsize)  # Default label -1
for i, data in enumerate(testloader):
    index, val_x, val_label = data
    print(np.min(index.numpy()), " - ", np.max(index.numpy()))