示例#1
0
    def __init__(self, csv_path, dataset_path, bag_refer_list, val_refer_list, logUtil=None, cuda_device=2, description=""):
        self.log = logUtil
        self.printlog("Current PID: " + str(os.getpid()))
        self.device = cuda_device
        self.description = description
        
        TrainDataset = DataUtils.DatasetLoader(csv_path, dataset_path, refer_list=np.load(bag_refer_list),
                                               mode="Train", up_size=self.UP_SIZE)
        ValDataset = DataUtils.DatasetLoader(csv_path, dataset_path, refer_list=np.load(val_refer_list), 
                                             mode="Valid", up_size=self.UP_SIZE)
        
        self.trainloader = torch.utils.data.DataLoader(TrainDataset, batch_size=self.BATCH_SIZE, num_workers=2, shuffle=True)
        self.validloader = torch.utils.data.DataLoader(ValDataset, batch_size=self.BATCH_SIZE, num_workers=2, shuffle=True)

        self.max_accu = 0
示例#2
0
# deprecation._PRINT_DEPRECATION_WARNINGS = False
# tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.WARNING)
# tf.get_logger().setLevel('INFO')
# tf.autograph.set_verbosity(1)
import numpy as np
# import tensorflow.contrib.eager as tfe
# Enable eager execution
# tfe.enable_eager_execution()
# import logging
# logging.disable(logging.WARNING)
import resnet
import vgg_preprocessing
import utils_imagenet
from configparser import ConfigParser
from Utils import DataUtils
utils = DataUtils()

##################################################

if len(sys.argv) != 2:  # We have to give 1 arg
    print('Arguments: config')
    sys.exit(-1)

cp = ConfigParser()
cp.read(sys.argv[1])
cp = cp[os.path.basename(__file__)]
#######################################################
_DEFAULT_IMAGE_SIZE = 224
_NUM_CHANNELS = 3
_BIAS_EPOCHS = 2
_NUM_TRAIN_FILES = 1024
示例#3
0
from bagging.MergeResults import BaggingResult
from Utils import DataUtils

csv_path = "q1_data/train2.csv"
dataset_path = "q1_data/train.npy"
val_refer_list = "bagging/val.npy"
BATCH_SIZE = 20
CUDA_DEVICE = 2
CLASS_NUM = 100
UP_SIZE = (224,224)

bag_pkl_paths=["./pklmodels/Class100_A_epoch_40.pkl",
                "./pklmodels/Class100_B_epoch_40.pkl",
                "./pklmodels/Class100_C_epoch_40.pkl"]

ValDataset = DataUtils.DatasetLoader(csv_path, dataset_path, refer_list=np.load(val_refer_list),
                                     mode="Valid", up_size=UP_SIZE)
validloader = torch.utils.data.DataLoader(ValDataset, batch_size=BATCH_SIZE, num_workers=2, shuffle=True)
results = BaggingResult(CUDA_DEVICE, bag_pkl_paths=bag_pkl_paths, class_num=CLASS_NUM)


merge_accuracy = []
split_accuracy = [[] for i in range(len(bag_pkl_paths))]
for i, data in enumerate(validloader):
    _, val_x, val_label = data
    merge_res, split_res = results.pred(val_x)
    merge_accuracy.append((val_label==merge_res).numpy().mean())
    print(i*BATCH_SIZE, " - ", (i+1)*BATCH_SIZE)
    
    for j in range(split_res.shape[0]):
        res = split_res[j]
        split_accuracy[j].append((val_label==res).numpy().mean())
示例#4
0
from utils_incremental.train_eval_MS import train_eval_MS
from utils_incremental.train_eval_LF import train_eval_LF
from utils_incremental.train_eval_MR_LF import train_eval_MR_LF

######### Modifiable Settings ##########
from configparser import ConfigParser
from Utils import DataUtils
import warnings, socket, os

if len(sys.argv) != 2:  # We have to give 1 arg
    print('Arguments: config')
    sys.exit(-1)
with warnings.catch_warnings(record=True) as warn_list:

    ######### Modifiable Settings ##########
    utils = DataUtils()
    # loading configuration file
    cp = ConfigParser()
    cp.read(sys.argv[1])
    cp = cp[os.path.basename(__file__)]
    ########################################
    train_batch_size = int(cp['train_batch_size'])  # Batch size for train
    test_batch_size = int(cp['test_batch_size'])  # Batch size for test
    eval_batch_size = int(cp['eval_batch_size'])  # Batch size for eval
    base_lr = float(cp['base_lr'])  # Initial learning rate
    lr_strat = utils.from_str_to_list(
        cp['lr_strat'], 'int')  # Epochs where learning rate gets decreased
    lr_factor = float(cp['lr_factor'])  # Learning rate decrease factor
    custom_weight_decay = float(cp['custom_weight_decay'])  # Weight Decay
    custom_momentum = float(cp['custom_momentum'])  # Momentum
    normalization_dataset_name = cp['normalization_dataset_name']  # Momentum
示例#5
0
from __future__ import division
import numpy as np
import torch as th
import AverageMeter as AverageMeter
import sys, os, copy
from Utils import DataUtils
utils = DataUtils()

if len(sys.argv) != 9:
    print(
        'Arguments: images_list_files_path, exemplars_files_path, scores_path, b1_scores_path, K, P, S, dataset'
    )
    sys.exit(-1)
#Parameters###############################
batch_size = 256
images_list_files_path = sys.argv[1]
exemplars_files_path = sys.argv[2]
scores_path = sys.argv[3]
b1_scores_path = sys.argv[4]
K = int(sys.argv[5])  #size of the memory for past class exemplars
P = int(sys.argv[6])  #number of classes per state
S = int(
    sys.argv[7])  #number of states, including the first non-incremental one
dataset = sys.argv[8]

###########################################
print('Dataset name = ' + dataset)

top1_acc_ft = []
top1_acc_mc = []
top5_acc_ft = []
        'Arguments: ft_feat_scores_path, ft_weights_path, K, P, S, Dataset_name [vgg_faces|ilsvrc|google_landmarks]'
    )
    sys.exit(-1)
#Parameters###############################
batch_size = 256
scores_path = sys.argv[1]
weights_path = sys.argv[2]
memory_size = sys.argv[3]
P = int(sys.argv[4])
S = int(sys.argv[5])
dataset = sys.argv[6]

#Parameters#############################################
print('Dataset name = ' + dataset)

utils = DataUtils()

top1_accuracies = []
rectified_top1_accuracies = []
top5_accuracies = []
rectified_top5_accuracies = []

batch_initial_weight_matrix = {}
batch_initial_bias_vector = {}

#get first batch weights and bias
first_model_weights_path = os.path.join(
    weights_path,
    dataset + '/S~' + str(S) + '/K~' + memory_size + '/b1_weight_bias.tf')

with open(first_model_weights_path, 'rb') as fp:
示例#7
0
intermediate_models_save_dir = os.path.join(cp['intermediate_models_save_dir'],
                                            algo_name)
saving_intermediate_models = cp['saving_intermediate_models'] == 'True'
datasets_mean_std_file_path = cp['datasets_mean_std_file_path']

if saving_intermediate_models:
    if not os.path.exists(intermediate_models_save_dir):
        os.makedirs(intermediate_models_save_dir)

print('Loading train images from ' + train_file_path)
print('Loading val images from ' + val_file_path)
print('Dataset name for normalization = ' + normalization_dataset_name)

#catching warnings
with warnings.catch_warnings(record=True) as warn_list:
    utils = DataUtils()
    dataset_mean, dataset_std = utils.get_dataset_mean_std(
        normalization_dataset_name, datasets_mean_std_file_path)

    print('dataset mean = ' + str(dataset_mean))
    print('dataset std = ' + str(dataset_std))

    # Data loading code
    normalize = transforms.Normalize(mean=dataset_mean, std=dataset_std)

    train_dataset = ImagesListFileFolder(
        train_file_path,
        transforms.Compose([
            transforms.RandomResizedCrop(224),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
示例#8
0
import numpy as np
import os, math
import sys, socket
from configparser import ConfigParser
try:
    import cPickle
except:
    import _pickle as cPickle
# Syspath for the folder with the utils files
# sys.path.insert(0, "/media/data/srebuffi")

import utils_resnet
import utils_icarl
import utils_data
from Utils import DataUtils
utils = DataUtils()
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"

if len(sys.argv) != 2:  # We have to give 1 arg
    print('Arguments: config')
    sys.exit(-1)

# loading configuration file
cp = ConfigParser()
cp.read(sys.argv[1])
cp = cp[os.path.basename(__file__)]

######### Modifiable Settings ##########
batch_size = int(cp['batch_size'])  # Batch size
nb_groups = int(cp['nb_groups'])  # Number of groups
nb_cl = int(cp['nb_cl'])  # Number of groups
示例#9
0
#!/usr/bin/env python
# coding=utf-8
import torch
import torch.nn as nn
import torch.nn.functional as F
from utils_pytorch import *
from Utils import DataUtils
import AverageMeter as AverageMeter
utils = DataUtils()


#code for baseline1 : FT+standard distillation
def train_eval(epochs, tg_model, ref_model, tg_optimizer, tg_lr_scheduler, \
            trainloader, testloader, \
            iteration, start_iteration, \
            T, beta, \
            fix_bn=False, weight_per_class=None, device=None):
    if device is None:
        device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

    if iteration > start_iteration:
        ref_model.eval()
        num_old_classes = ref_model.fc.out_features

    top = min(5, tg_model.fc.out_features)

    for epoch in range(epochs):
        #train
        tg_model.train()
        if fix_bn:
            for m in tg_model.modules():
# First batch parameters
first_model_load_path = cp['first_model_load_path']

# Incremental batches dataset
normalization_dataset_name = cp['normalization_dataset_name']
dataset_files_dir = cp['dataset_files_dir']
data_output_dir = cp['data_output_dir']

# Semi-supervised labelisation settings
B = float(cp['B'])
classical_AF = cp['classical_AF']
rerun = cp['rerun'] == "True"
apply_th_train = cp['apply_th_train'] == "True"
apply_th_val_al = cp['apply_th_val_al'] == "True"
train_files_dir = os.path.join(dataset_files_dir, 'separated/train')
path_val_batch1 = os.path.join(dataset_files_dir, 'batch1/val.lst')
path_train_batch1 = os.path.join(dataset_files_dir, 'batch1/train.lst')
full_paths_suffix = ''
################ Global variables
utils = DataUtils()
dataset_mean, dataset_std = utils.get_dataset_mean_std(
    normalization_dataset_name, datasets_mean_std_file_path)
normalize = transforms.Normalize(mean=dataset_mean, std=dataset_std)

print("Running on " + str(socket.gethostname()) + " | gpu " + str(gpu))
utils.print_parameters(cp)
assert (mode in ['il', 'il_al'])
# start the main program
main(I)
示例#11
0
#!/usr/bin/nohup python
# -*- coding: utf-8 -*-

__author__ = "Eden Belouadah & Adrian Popescu"

import sys, copy
import numpy as np
import torch as th
import AverageMeter as AverageMeter
from Utils import DataUtils

utils = DataUtils()
from sklearn import preprocessing

dataset = sys.argv[1]  # PARAM - name of the dataset
list_root_dir = sys.argv[2]  #  root dir for the list of images
local_root_dir = sys.argv[
    3]  # root dir on node for the features and the classification masks
Z = int(sys.argv[4])  #  PARAM - number of incremental Z for the dataset
B = int(sys.argv[5])  #  PARAM - size of the allowed B for past classes
P = int(sys.argv[6])  #  PARAM - size of each incremental batch
last_batch_number = int(sys.argv[7])  # PARAM - max number of incremental Z
top_rewinded = int(
    sys.argv[8])  #  PARAM - number of past classes used for rewinding

batch_size = 256
''' USAGE

######### CIFAR-100 ######### S=10 : K={1000,500,250} | S=20, S=50 : K=250
nohup python ./codes/scail.py cifar100 ./data/images_list_files /path/to/scail/data 10 1000 10 10 10   2>&1 | tee /path/to/your/logs/scail/cifar100/s10/k1000/scail_cifar100_s10_1k.log &
nohup python ./codes/scail.py cifar100 ./data/images_list_files /path/to/scail/data 10 500 10 10 10    2>&1 | tee /path/to/your/logs/scail/cifar100/s10/k500/scail_cifar100_s10_0,5k.log &
示例#12
0
if len(sys.argv) != 8:
    print(
        'Arguments: fine_tuning_scores_path, K, P, S, Dataset_name [vgg_faces|ilsvrc|google_landmarks], first_batch_number [2-, last_batch_number'
    )
    sys.exit(-1)
#Parameters###############################
batch_size = 256
scores_path = sys.argv[1]
memory_size = sys.argv[2]
P = int(sys.argv[3])
S = int(sys.argv[4])
dataset = sys.argv[5]
first_batch = int(sys.argv[6])
last_batch = int(sys.argv[7])

utils = DataUtils()
#Parameters###############################
images_list_files_path = '/home/eden/images_list_files/'
###########################################
print('Dataset name = ' + dataset)
print('scores_path = ' + scores_path)
print('K = ' + memory_size)
print('P = ' + str(P))
print('S = ' + str(S))
print('first_batch = ' + str(first_batch))
print('last_batch= ' + str(last_batch))


def softmax(x):
    e_x = np.exp(x - np.max(x))
    return e_x / e_x.sum()
示例#13
0
save_csv_path = "q1_data/samplesummision_class100.csv"
testset_path = "q1_data/test.npy"
BATCH_SIZE = 20
CUDA_DEVICE = 2
CLASS_NUM = 100
UP_SIZE = (224, 224)

csvheader = ["image_id", "fine_label"]

bag_pkl_paths = [
    "./pklmodels/Class20_A_epoch_40.pkl", "./pklmodels/Class20_B_epoch_40.pkl",
    "./pklmodels/Class20_C_epoch_40.pkl"
]

testDataset = DataUtils.DatasetLoader("q1_data/samplesummission1.csv",
                                      testset_path,
                                      mode="Test",
                                      up_size=UP_SIZE)

setsize = len(testDataset)
testloader = torch.utils.data.DataLoader(testDataset,
                                         batch_size=BATCH_SIZE,
                                         num_workers=2,
                                         shuffle=False)
results = BaggingResult(CUDA_DEVICE,
                        bag_pkl_paths=bag_pkl_paths,
                        class_num=CLASS_NUM)

resdata = -np.ones(setsize)  # Default label -1
for i, data in enumerate(testloader):
    index, val_x, val_label = data
    print(np.min(index.numpy()), " - ", np.max(index.numpy()))
示例#14
0
                           kernel_initializer=init)(d)
        # define model
        model = Model(in_image, patch_out)
        return model


if __name__ == '__main__':
    path2TR1 = os.path.join(os.getcwd(), 'Tr1', 'TrainT1')
    path2TR2 = os.path.join(os.getcwd(), 'Tr2', 'TrainT2')
    checkpoint_path = os.path.join(os.getcwd(), 'Trained_Model4')

    if not os.path.isdir(checkpoint_path):
        os.mkdir(checkpoint_path)

    batch_size = 4
    epochs = 100
    # load images from
    images_x = DataUtils(path2TR1, (220, 184)).get_data(batch_size)
    images_y = DataUtils(path2TR2, (220, 184)).get_data(batch_size)

    sample_x_data = next(iter(images_x))
    sample_y_data = next(iter(images_y))

    cycleGan = CycleGAN_TR1_TR2((220, 184, 1), checkpoint_path)

    cycleGan.train(images_x,
                   images_y,
                   epochs,
                   plot_results=True,
                   sample_data=(sample_x_data, sample_y_data))