def setUp(self):
        self.train_set, _ = dta.get_client_data(
            'emnist',
            'example',
            {
                'supervised': 0.0,
                'unsupervised': 0.0
            },
        )

        self.train_set = dta.get_sample_client_data(self.train_set, 2, 8)

        classifier_ph = autoencoder_ph = {
            'dataset': 'emnist',
            'optimizer': 'SGD',
            'learning_rate': 10.0
        }

        self.classifier = mdl.DenseSupervisedModel(classifier_ph)
        self.autoencoder = mdl.DenseAutoencoderModel(autoencoder_ph)

        self.dataloader_classifier = dta.DataLoader(
            self.classifier.preprocess_emnist,
            num_epochs=1,
            shuffle_buffer=500,
            batch_size=2,
            learning_env='federated')

        self.dataloader_autoencoder = dta.DataLoader(
            self.autoencoder.preprocess_emnist,
            num_epochs=1,
            shuffle_buffer=500,
            batch_size=2,
            learning_env='federated')
Esempio n. 2
0
def create_dataloaders(LABEL_MODE, TRAIN_FOLDS, TRAIN_SCENES, BATCHSIZE,
                       TIMESTEPS, EPOCHS, NFEATURES, NCLASSES, VAL_FOLDS,
                       VAL_STATEFUL):
    train_loader = dataloader.DataLoader('train',
                                         LABEL_MODE,
                                         TRAIN_FOLDS,
                                         TRAIN_SCENES,
                                         batchsize=BATCHSIZE,
                                         timesteps=TIMESTEPS,
                                         epochs=EPOCHS,
                                         features=NFEATURES,
                                         classes=NCLASSES)
    train_loader_len = train_loader.len()
    print('Number of batches per epoch (training): ' + str(train_loader_len))
    val_loader = dataloader.DataLoader('val',
                                       LABEL_MODE,
                                       VAL_FOLDS,
                                       TRAIN_SCENES,
                                       epochs=EPOCHS,
                                       batchsize=BATCHSIZE,
                                       timesteps=TIMESTEPS,
                                       features=NFEATURES,
                                       classes=NCLASSES,
                                       val_stateful=VAL_STATEFUL)

    val_loader_len = val_loader.len()
    print('Number of batches per epoch (validation): ' + str(val_loader_len))

    return train_loader, val_loader
Esempio n. 3
0
def train(config):
    cuda = torch.cuda.is_available()
    device = torch.device("cuda" if cuda else "cpu")
    dehaze_net = net.AODNet().to(device)
    dehaze_net.apply(weights_init)

    train_dataset = dataloader.DataLoader(config.path_clearimg,
                                          config.path_hazyimg)
    val_dataset = dataloader.DataLoader(config.path_clearimg,
                                        config.path_hazyimg,
                                        mode="val")
    train_loader = torch.utils.data.DataLoader(
        train_dataset,
        batch_size=config.train_batch_size,
        shuffle=True,
        num_workers=config.num_workers,
        pin_memory=True)
    val_loader = torch.utils.data.DataLoader(val_dataset,
                                             batch_size=config.val_batch_size,
                                             shuffle=True,
                                             num_workers=config.num_workers,
                                             pin_memory=True)
    # criterion = nn.MSELoss().to(device)
    criterion = pytorch_ssim.SSIM(window_size=11)
    optimizer = torch.optim.Adam(dehaze_net.parameters(),
                                 lr=config.lr,
                                 weight_decay=config.weight_decay)
    # optimizer = torch.optim.SGD(dehaze_net.parameters(
    # ), lr=config.lr, weight_decay=config.weight_decay)
    dehaze_net.train()
    for epoch in range(config.num_epochs):
        for iteration, (clear_img, hazy_img) in enumerate(train_loader):
            clear_img = clear_img.to(device)
            hazy_img = hazy_img.to(device)
            clean_image = dehaze_net(hazy_img)
            loss = criterion(clean_image, clear_img)
            optimizer.zero_grad()
            loss.backward()
            torch.nn.utils.clip_grad_norm(dehaze_net.parameters(),
                                          config.grad_clip_norm)
            optimizer.step()

            if ((iteration + 1) % config.display_iter) == 0:
                print("Epoch", epoch + 1, ": Loss at iteration", iteration + 1,
                      ":", loss.item())

        # Validation Stage
        for iter_val, (clear_img, hazy_img) in enumerate(val_loader):
            clear_img = clear_img.to(device)
            hazy_img = hazy_img.to(device)
            clean_image = dehaze_net(hazy_img)
            torchvision.utils.save_image(
                torch.cat((hazy_img, clean_image, clear_img), 0),
                config.sample_output_folder + str(iter_val + 1) + ".jpg")
        torch.save(dehaze_net.state_dict(),
                   config.snapshots_folder + "Epoch" + str(epoch + 1) + '.pt')
Esempio n. 4
0
    def renew_everything(self):
        # renew dataloader.
        self.identity_loader = DL.DataLoader(config,
                                             DL.IdentityDataset(config))
        self.identity_loader.renew(min(floor(self.resl), self.max_resl))

        self.loader = DL.DataLoader(config, DL.TrainingDataset(config))
        self.loader.renew(min(floor(self.resl), self.max_resl))
        # define tensors
        self.z = torch.FloatTensor(self.loader.batch_size, self.nz)
        self.x = torch.FloatTensor(self.loader.batch_size, 3,
                                   self.loader.im_size, self.loader.im_size)
        self.x_tilde = torch.FloatTensor(self.loader.batch_size, 3,
                                         self.loader.im_size,
                                         self.loader.im_size)
        self.real_label = torch.FloatTensor(self.loader.batch_size).fill_(1)
        self.fake_label = torch.FloatTensor(self.loader.batch_size).fill_(0)

        # enable cuda
        if self.use_cuda:
            self.z = self.z.cuda()
            self.x = self.x.cuda()
            self.x_tilde = self.x.cuda()
            self.real_label = self.real_label.cuda()
            self.fake_label = self.fake_label.cuda()
            torch.cuda.manual_seed(config.random_seed)

        # wrapping autograd Variable.
        self.x = Variable(self.x)
        self.x_tilde = Variable(self.x_tilde)
        self.z = Variable(self.z)
        self.real_label = Variable(self.real_label)
        self.fake_label = Variable(self.fake_label)

        # ship new model to cuda.
        if self.use_cuda:
            self.G = self.G.cuda()
            self.D = self.D.cuda()

        # optimizer
        betas = (self.config.beta1, self.config.beta2)
        if self.optimizer == 'adam':
            self.opt_g = Adam(filter(lambda p: p.requires_grad,
                                     self.G.parameters()),
                              lr=self.lr,
                              betas=betas,
                              weight_decay=0.0)
            self.opt_d = Adam(filter(lambda p: p.requires_grad,
                                     self.D.parameters()),
                              lr=self.lr,
                              betas=betas,
                              weight_decay=0.0)
Esempio n. 5
0
 def __init__(self, ph):
     Algorithm.__init__(self, ph)
     self.dataloader = dta.DataLoader(self.preprocess_fn,
                                      self.ph['num_epochs'],
                                      self.ph['shuffle_buffer'],
                                      self.ph['batch_size'])
     self.num_epochs = self.ph['num_epochs']
    def create_random(self, ):

        # load data for training
        v = loader.DataLoader()
        start_training = v.load_data(self.data_set, self.class_names)

        # shuffling the array
        random.shuffle(start_training)

        (xtrain, ytrain) = ([], [])

        # creating arrays for both the features and the labels
        for d, c in start_training:
            xtrain.append(d)
            ytrain.append(c)

        (xtrain, ytrain) = [np.array(lis) for lis in [xtrain, ytrain]]

        xtrain = np.asarray(xtrain)
        ytrain = np.asarray(ytrain)

        # 80% training and 20% testing
        x_train, x_test, y_train, y_test = train_test_split(
            xtrain, ytrain, test_size=self.num_split, random_state=0)

        self.sets_gets.xTest = x_test
        self.sets_gets.yTest = y_test
        xt = self.sets_gets.xTest
        yt = self.sets_gets.yTest

        # saving the testing data to pickle
        pickle.dump(xt, open(self.img, 'wb'))
        print("Random Forest X Test Saved")

        pickle.dump(yt, open(self.lbl, 'wb'))
        print("Random Forest Y Test Saved")

        # training the random forest model
        forest_classifier = RandomForestClassifier(n_estimators=2)
        model = forest_classifier.fit(x_train, y_train)

        # accuracy and loss for the model
        a = forest_classifier.score(x_train, y_train)
        print("the value of the training accuracy : " + str(a))

        l = forest_classifier.score(x_test, y_test)
        print("the value of the testing accuracy : " + str(l))

        # confusion matrics
        y_pred = forest_classifier.predict(x_test)

        print("Classification Report:")
        print(metrics.classification_report(y_test, y_pred))

        print("Confusion Matrix:")
        print(metrics.confusion_matrix(y_test, y_pred))

        # saving the random forest model to pickle
        pickle.dump(model, open(self.model_name, 'wb'))
        print("Random Forest Model Saved")
Esempio n. 7
0
 def __init__(self, ph):
     Algorithm.__init__(self, ph)
     self.num_rounds = self.ph['num_rounds']
     self.num_clients_per_round = self.ph['num_clients_per_round']
     self.dataloader = dta.DataLoader(self.preprocess_fn,
                                      self.ph['num_epochs'],
                                      self.ph['shuffle_buffer'],
                                      self.ph['batch_size'], 'federated')
Esempio n. 8
0
    def initialize(self, opt):
        BaseDataLoader.initialize(self, opt)
        root_path = "traindata/"

        self.mnistdataset = dataloader.get_training_set(root_path)
        self.mnistdataloader = dataloader.DataLoader(
            dataset=self.mnistdataset,
            num_workers=int(opt.nThreads),
            batch_size=opt.batchSize / 2,
            shuffle=True)
Esempio n. 9
0
    def setUp(self):
        ph = {'optimizer': 'SGD', 'learning_rate': 10.0, 'dataset': 'cifar100'}

        keras_model_fn = mdl.SimpleRotationSelfSupervisedModel(ph)
        preprocess_fn = getattr(keras_model_fn,
                                'preprocess_{}'.format(ph['dataset']))

        dataloader = dta.DataLoader(preprocess_fn,
                                    num_epochs=1,
                                    shuffle_buffer=1,
                                    batch_size=20,
                                    learning_env='federated')

        train_client_data, _ = dta.get_client_data(ph['dataset'],
                                                   'example', {
                                                       'supervised': 0.0,
                                                       'unsupervised': 0.0
                                                   },
                                                   sample_client_data=False)

        sample_batch = dataloader.get_sample_batch(train_client_data)
        model_fn = functools.partial(keras_model_fn.create_tff_model_fn,
                                     sample_batch)

        iterative_process = tff.learning.build_federated_averaging_process(
            model_fn)
        state = iterative_process.initialize()

        sample_clients = train_client_data.client_ids[:5]
        federated_train_data = dataloader.make_federated_data(
            train_client_data, sample_clients)

        state, _ = iterative_process.next(state, federated_train_data)

        self.old_model = keras_model_fn()
        self.old_model.build(sample_batch)
        tff.learning.assign_weights_to_keras_model(self.old_model, state.model)

        self.tmp_dir = 'tests/tmp/'
        if not os.path.isdir(self.tmp_dir):
            os.mkdir(self.tmp_dir)

        self.model_fp = os.path.join(self.tmp_dir, 'model.h5')
        keras_model_fn.save_model_weights(self.model_fp, state, sample_batch)

        self.new_model = keras_model_fn.load_model_weights(self.model_fp)

        ph = {
            'optimizer': 'SGD',
            'learning_rate': 10.0,
            'dataset': 'cifar100',
            'pretrained_model_fp': self.model_fp
        }

        self.transfer_model = mdl.SimpleRotationSupervisedModel(ph)()
Esempio n. 10
0
    def setUp(self):
        self.train_set, _ = dta.get_client_data('emnist', 
                                            'example', 
                                            {'supervised':0.0, 
                                            'unsupervised':0.0}
                                            )

        self.train_set = dta.get_sample_client_data(self.train_set, 2, 8)

        self.dataloader_classifier = dta.DataLoader(dta.preprocess_classifier,
                                            num_epochs = 1,
                                            shuffle_buffer = 500,
                                            batch_size = 2
                                            )

        self.dataloader_autoencoder = dta.DataLoader(dta.preprocess_autoencoder,
                                            num_epochs = 1,
                                            shuffle_buffer = 500,
                                            batch_size = 2
                                            )
    def get_price(self, symbols, dates):
        """Read stock data (price) for given symbols from files."""

        df = pd.DataFrame(index=dates)

        for symbol in symbols:
            df_temp = dataloader.DataLoader().loaddata(
                self.__symbol_to_path(symbol))
            df_temp = pd.DataFrame(df_temp, columns={'price'})
            df_temp = df_temp.rename(columns={'price': symbol})
            df = df.join(df_temp)
            df = df.dropna()

        return df
Esempio n. 12
0
def test(model_dict, using_cuda=True):
    if using_cuda:
        net = Net().cuda()
    else:
        net = Net()
    net.load_state_dict(torch.load(model_dict))
    dataset = dataloader.DataLoader("test_set.pkl",
                                    batch_size=1,
                                    using_cuda=using_cuda)
    count = 0
    for i, batch in enumerate(dataset):
        X = batch["feature"]
        y = batch["class"]
        y_pred, _ = net(X)
        p, idx = torch.max(y_pred.data, dim=1)
        count += torch.sum(torch.eq(idx.cpu(), y.data.cpu()))
    print("accuracy: %f" % (count / dataset.num))
def main():

  mscoco = dataloader.DataLoader(data_dir = './MSCOCO-IC/')

  import sys



  kind = sys.argv[1]

  # train
  NUM_CLASSES = 10 
  resnet = models.resnet18(pretrained=True)
  num_ftrs = resnet.fc.in_features
  resnet.fc = torch.nn.Linear(num_ftrs, NUM_CLASSES)
  criterion = torch.nn.CrossEntropyLoss()
  optimizer = torch.optim.SGD(resnet.parameters(), lr=0.001, momentum=0.9)
  exp_lr_scheduler = lr_scheduler.StepLR(optimizer, step_size=7, gamma=0.1)

  #resnet.load_state_dict(torch.load('./coco_'+kind+'.pth'))
  train_model(resnet, mscoco, criterion, optimizer, 20, './coco_'+kind+'.pth', kind)
Esempio n. 14
0
def run():
    df = pd.read_csv(CONFIG.INPUT_PATH, encoding="latin-1")
    df.loc[:, "Sentence #"] = df["Sentence #"].fillna(method="ffill")
    pos_lb = LabelEncoder()
    pos_lb = pos_lb.fit(df['POS'].values)
    df['POS'] = pos_lb.transform(df['POS'].values)
    sentence = df.groupby('Sentence #')['Word'].apply(list).values
    pos = df.groupby('Sentence #')["POS"].apply(list).values
    x_val = sentence[:int(0.1 * len(sentence))]
    y_val = pos[:int(0.1 * len(sentence))]

    print('--------- [INFO] TOKENIZING --------')
    train_loader = dataloader.DataLoader(sentence, pos, CONFIG.Batch_size)

    pickle.dump(pos_lb, open('input/pos_lb.pickle', 'wb'))
    pickle.dump(train_loader.vocab.word_to_idx,
                open('input/word_to_idx.pickle', 'wb'))

    x_val = train_loader.vocab.numericalize(x_val)
    x_val = keras.preprocessing.sequence.pad_sequences(
        x_val, padding='post', value=train_loader.vocab.word_to_idx["<PAD>"])
    y_val = keras.preprocessing.sequence.pad_sequences(y_val, padding='post')

    vocab_size = len(train_loader.vocab.word_to_idx)
    classes = len(list(pos_lb.classes_))

    model = NERModel.NERModel(vocab_size=vocab_size, num_classes=classes)

    model.compile(loss='sparse_categorical_crossentropy',
                  optimizer='adam',
                  metrics=['sparse_categorical_accuracy'])

    print(f'------- [INFO] STARTING TRAINING -------')

    model.fit(train_loader,
              epochs=CONFIG.Epochs,
              batch_size=CONFIG.Batch_size,
              validation_data=(x_val, y_val))
    model.save(CONFIG.MODEL_PATH)
Esempio n. 15
0
def load_dataset(tensors):
    transform = transforms.Compose([
        transforms.CenterCrop(224),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406],
                             std=[0.229, 0.224, 0.225]),
    ])
    dataset = datasets.ImageFolder(root=FLAGS.data_dir, transform=transform)

    tensor_type = 'float16'

    loader = dataloader.DataLoader(
        dataset,
        batch_size=FLAGS.micro_batch_size * FLAGS.batches_per_step,
        tensor_type=tensor_type,
        shuffle=True,
        num_workers=FLAGS.num_workers,
        drop_last=True
    )  # In the case there is not sufficient data in last batch drop it

    return DataSet(tensors, FLAGS.micro_batch_size, FLAGS.batches_per_step,
                   loader, np.float16)
def inference():
    device = "cuda:0" if torch.cuda.is_available() else "cpu"
    save_file = input("save model name : ")
    try:
        if torch.cuda.is_available():
            model = torch.load(save_file, map_location={"cpu": "cuda:0"})
        else:
            model = torch.load(save_file, map_location={"cuda:0": "cpu"})
        print("Success loading model")
    except IOError:
        print("Couldn't find model")
        sys.exit(0)

    print("best epoch was {}".format(model.info_dict['epoch']))

    # 1783 : length of test data set
    test_data_loader = dataloader.DataLoader(1783, test=True)
    model.eval()
    with torch.no_grad():
        X, _ = test_data_loader.get_batch()
        X = X.to(device)
        output = model(X)
    utils.generate_csv(output)
import ABot_Decoder
import QBot
import QBot_Encoder
import QBot_Decoder
import discriminator as Discriminator
torch.backends.cudnn.enabled = False
random.seed(32)
np.random.seed(32)
torch.manual_seed(7)
torch.cuda.manual_seed_all(7)
#Load Data
dialog_loc = '../../../chat_processed_data.h5'
param_loc = '../../../chat_processed_params.json'
image_loc = '../../../data_img.h5'

data = dataloader.DataLoader(dialog_loc, image_loc, param_loc)
print("Done: Data Preparation")

#CUDA
USE_CUDA = True
gpu = 0

#Parameters
params = {}
params['batch_first'] = False
params['num_layers'] = 2
params['hidden_dim'] = 512
params['embed_size'] = 300
params['vocab_size'] = len(data.ind2word.keys())
params['embedding_size'] = 300
params['vgg_out'] = 4096
                                    download=not args.no_download_dataset)
    sensor_size = dataset.sensor_size
elif args.dataset == "dvs_gesture":
    transform = tonic.transforms.Compose([
        tonic.transforms.Downsample(spatial_factor=0.25)])
    dataset = tonic.datasets.DVSGesture(save_to='./data', train=True, transform=transform, 
                                        download=not args.no_download_dataset)
    sensor_size = (32, 32)
    polarity = True
else:
    raise RuntimeError("Unknown dataset '%s'" % args.dataset)

# Create loader
start_processing_time = perf_counter()
data_loader = dataloader.DataLoader(dataset, shuffle=True, batch_size=batch_size,
                                    sensor_size=sensor_size, polarity=polarity,
                                    dataset_slice=dataset_slice)
end_process_time = perf_counter()
print("Data processing time:%f ms" % ((end_process_time - start_processing_time) * 1000.0))

# Calculate number of input neurons from sensor size
num_input_neurons = np.product(sensor_size) 
if polarity:
    num_input_neurons *= 2

# Calculate number of valid outputs from classes
num_outputs = len(dataset.classes)

# Round up to power-of-two
num_output_neurons = int(2**(np.ceil(np.log2(num_outputs))))
Esempio n. 19
0
from mxnet import nd, autograd, gluon
import dataloader
from sklearn.model_selection import train_test_split
from matplotlib import pyplot as plt
from datetime import datetime

#Set Contexts
ctx = mx.gpu() if mx.test_utils.list_gpus() else mx.cpu()
data_ctx = ctx
model_ctx = ctx

#load the data
num_inputs = 784
batch_size = 64
num_instances = 60000
data = dataloader.DataLoader()
train_data,train_labels = data.load_data()
test_data,test_labels = data.load_data(mode = 'test')
X_train, X_val, y_train, y_val = train_test_split(train_data, train_labels, test_size=0.30, random_state=42)

train_data = []
for index,data in enumerate(X_train):
    temp = y_train[index]
    train_data.append((data,temp))
 
num_instances = len(train_data)

val_data = []
for index,data in enumerate(X_val):
    temp = y_val[index]
    val_data.append((data,temp))
Esempio n. 20
0
    name = 'data/seq/test.pickle'
    iname = 'data/seq/test_in.pickle'
    oname = 'data/seq/test_out.pickle'
    m = Mode.test
elif mode == 'valid':
    name = 'data/seq/valid.pickle'
    iname = 'data/seq/valid_in.pickle'
    oname = 'data/seq/valid_out.pickle'
    m = Mode.train
else:
    exit(1)


# load data

train = DL.DataLoader(m)
# test = DL.DataLoader(Mode.test)

# for validation data
if mode == 'train':
    for k in train.keys():
        train[k] = train[k][76:]
elif mode == 'valid':
    for k in train.keys():
        train[k] = train[k][:76]


# get time data
output_train = train.pop('output')
# output_test = test.pop('output')
Esempio n. 21
0
                if "dev.conllu" in file:
                    dev_path = treebank + "/" + file

                if "test.conllu" in file:
                    test_path = treebank + "/" + file

        if train_path is None:
            continue
        language_fullname = "_".join(os.path.basename(treebank).split("_")[1:])
        lang_full = lang
        f = train_path.strip()

        i = 0
        data = pyconll.load_from_file(f"{f}")
        data_loader = dataloader.DataLoader(args, relation_map)

        inputFiles = [train_path, dev_path, test_path]
        data_loader.readData(inputFiles)

        train_features, train_output_labels = data_loader.getBinaryFeatures(
            train_path, type="train", p=args.percent, shuffle=True)
        if dev_path:
            dev_features, dev_output_labels = data_loader.getBinaryFeatures(
                dev_path, type="dev", p=1.0, shuffle=False)
        test_features, test_output_labels = data_loader.getBinaryFeatures(
            test_path, type="test", p=1.0, shuffle=False)

        for feature in args.features:
            if feature in test_features and feature in train_features:
                try:
Esempio n. 22
0
iname = '{}/{}/in.pickle'.format(prefix, mode)
oname = '{}/{}/out.pickle'.format(prefix, mode)
mname = '{}/{}/mask.pickle'.format(prefix, mode)
sname = '{}/{}/swa.pickle'.format(prefix, mode)

if mode == 'train':
    m = Mode.train
elif mode == 'test':
    m = Mode.test
elif mode == 'valid':
    m = Mode.train
else:
    exit(1)

# load data
data = DL.DataLoader(m)
print(data.keys())
# for validation data
if mode == 'train':
    for k in data.keys():
        data[k] = data[k][76:]
elif mode == 'valid':
    for k in data.keys():
        data[k] = data[k][:76]

# get time data
output = data.pop('output')
input = data.pop('input')
swa = data.pop('SWA')
print(np.array(swa).shape)
Esempio n. 23
0
# Parse command line options
(options, args) = parser.parse_args()

# Assert if the dataset is within the predefined possible options
datasetDict = {"MNIST": 1, "CIFAR-10": 2}
if options.dataset not in datasetDict:
	print ("Error: Dataset not within the predefined possible options (%s)" % str(datasetDict.keys()))
	exit (-1)
else:
	# Add the dataset name at the end of the model
	options.logDir = options.logDir + "-" + options.dataset
	options.checkpointDir = options.checkpointDir + "-" + options.dataset

	# Import dataset
	dataloader = dataloader.DataLoader(options.dataset, one_hot=False)
	options.numTrainingInstances = dataloader.train.num_examples
	options.numValidationInstances = dataloader.validation.num_examples
	options.numTestInstances = dataloader.test.num_examples
	print ("Training Instances: %d | Validation Instances: %d | Test Instances: %d" % 
		(options.numTrainingInstances, options.numValidationInstances, options.numTestInstances))

	options.imageWidth = dataloader.image_width
	options.imageHeight = dataloader.image_height
	options.imageChannels = dataloader.image_channels

	if options.dataset == "CIFAR-10":
		options.numPrimaryCapsules = 64
		options.numDecoderFirstHiddenLayerUnits = 2048
		options.numDecoderSecondHiddenLayerUnits = 4096
Esempio n. 24
0
def main():
    args = parser.parse_args()

    #Create Train and Validation Loaders
    data_train = dataloader.DataLoader(args.dataset,
                                       args.num_classes,
                                       args,
                                       train=True)
    data_val = dataloader.DataLoader(args.dataset,
                                     args.num_classes,
                                     args,
                                     train=False)

    #Check actualy number of classes
    if args.num_classes == -1:
        args.num_classes = data_train.num_classes
    data_train = torch.utils.data.DataLoader(data_train,
                                             batch_size=args.batch_size,
                                             shuffle=True,
                                             num_workers=0)
    data_val = torch.utils.data.DataLoader(data_val,
                                           batch_size=args.batch_size,
                                           shuffle=False,
                                           num_workers=0)

    # Create Model
    model = network.Network(args.num_classes, args.projection_size,
                            args.base_model, args.pretrained, args.normalize,
                            args.img_size, args.layer_size).to(device)
    if not args.contrastive:
        model.convert()
    if args.load_model:
        load_model(model, args.start_epoch, args.contrastive)

    #Losses
    contrastive_loss = ContLoss(args.batch_size, args.temperature,
                                args.sim_type)
    cross_entropy_loss = torch.nn.CrossEntropyLoss()

    #Optimizer
    opt, lr_scheduler = start_opt(args, model)

    if args.train:
        for process in range(2):
            for e in range(args.epoch):
                losses = []
                for step, (images, labels) in enumerate(data_train):
                    opt.zero_grad()

                    #To adjust for dataloader outputting two images
                    if len(images.shape) == 5:
                        images = images.permute(1, 0, 2, 3, 4)
                        images = torch.cat([images[0], images[1]])

                    #Loss bug so skip and get in next shuffled batch
                    if images.shape[
                            0] != args.batch_size * 2 and args.contrastive and process == 0:
                        continue

                    z = model(images)

                    if args.contrastive and process == 0:
                        loss = contrastive_loss(z)
                    else:
                        loss = cross_entropy_loss(z, labels.squeeze())

                    loss.backward()
                    opt.step()

                    losses.append(loss.item())

                lr_scheduler.step()

                #Validate after epoch either all the time for normal model or during find layer tuning for contrastive
                if process == 2 and args.contrastive or not args.contrastive:
                    validate(data_val, model, e)

                print("Epoch: ", e, ", Loss: ", np.mean(losses), '\n')
                save_model(model, e, args.contrastive)

            #If just resnet second stage not needed
            if not args.contrastive:
                break
            elif process == 0:
                model.convert()
                args.epoch = args.final_layer_epoch
                opt, lr_scheduler = start_opt(args, model)
                args.contrastive = False

    else:
        if not args.load_model:
            print("Define load model for test")
        else:
            validate(data_val, model, e)
Esempio n. 25
0
def training():
    train_data_loader = dataloader.DataLoader(BATCH_SIZE, test=False)
    eval_X, eval_y = train_data_loader.get_eval_data()
    eval_data_loader = dataloader.DataLoader(BATCH_SIZE,
                                             test=False,
                                             X=eval_X,
                                             y=eval_y)

    writer = SummaryWriter(SUMMARY_WRITER_PATH)

    if SCHEDULER:
        scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
                                                    step_size=STEP_SIZE,
                                                    gamma=GAMMA)

    if loading:
        best_loss = float(model.info_dict['best_loss'])
        init_epoch = int(model.info_dict['epoch']) + 1
        print("Best loss is {:4f}".format(best_loss))
    else:
        best_loss = math.inf
        init_epoch = 1

    for epoch in range(init_epoch, EPOCH_SIZE + 1):
        print("\n>>> Epoch {} / {}".format(epoch, EPOCH_SIZE))
        # print(">>> Start training")
        model.train()  # training mode

        if SCHEDULER:
            scheduler.step()

        train_losses = []
        iteration = 0
        while train_data_loader.next_is_available():
            iteration += 1
            X, y = train_data_loader.get_batch()
            X, y = X.to(DEVICE), y.to(DEVICE)
            optimizer.zero_grad()
            out = model(X)
            train_loss = nn.MSELoss()(out, y)
            train_losses.append(train_loss.item())
            train_loss.backward()
            optimizer.step()
            if iteration == 1 and epoch % 10 == 0:
                utils.save_figures(
                    X, out, "training_images/train_{}.png".format(epoch))
            # print("training loss : {:12.4f}".format(train_loss), end='\r')
        avg_train_loss = mean(train_losses)
        # print("\n >>> Average training loss: {}".format(avg_train_loss))
        writer.add_scalar('avg_train_loss', avg_train_loss, epoch)
        train_data_loader.restart(shuffle=SHUFFLE)

        # print(">>> Start Test")
        model.eval()  # evaluation mode
        test_losses = []
        val_iteration = 0
        with torch.no_grad():
            while eval_data_loader.next_is_available():
                val_iteration += 1
                X, y = eval_data_loader.get_batch()
                X, y = X.to(DEVICE), y.to(DEVICE)
                out = model(X)
                test_loss = nn.MSELoss()(out, y)
                test_losses.append(test_loss.item())
                if val_iteration == 1 and epoch % 10 == 0:
                    utils.save_figures(X, out,
                                       "test_images/test_{}.png".format(epoch))
            avg_test_loss = mean(test_losses)
            print(">>> Average test loss: {}".format(avg_test_loss))
            writer.add_scalar('avg_test_loss', avg_test_loss, epoch)
            eval_data_loader.restart()

        if avg_test_loss < best_loss:
            print(">>> Saving models...")
            best_loss = avg_test_loss
            save_dict = {
                "epoch": epoch,
                "best_loss": best_loss,
                "optimizer": optimizer.state_dict()
            }
            model.info_dict = save_dict
            torch.save(model, SAVE_NAME)
        if epoch in SAVE_EPOCH_LIST:
            if os.path.isfile(SAVE_NAME):
                shutil.copyfile(
                    SAVE_NAME, SAVE_NAME.replace(".pt",
                                                 "_{}.pt".format(epoch)))
    writer.close()
Esempio n. 26
0
def train(opt):
    # Deal with feature things before anything
    opt.use_fc, opt.use_att = utils.if_use_feat(opt.caption_model)
    if opt.use_box:
        opt.att_feat_size = opt.att_feat_size + 5

    loader = dataloader.DataLoader(opt)
    opt.vocab_size = loader.vocab_size
    opt.seq_length = loader.seq_length

    tb_summary_writer = tb and tb.SummaryWriter(opt.checkpoint_path)

    infos = {}
    histories = {}
    if opt.start_from is not None:
        # open old infos and check if models are compatible
        with open(os.path.join(opt.start_from, 'infos_'+opt.id+'.pkl'), 'rb') as f:
            infos = utils.pickle_load(f)
            saved_model_opt = infos['opt']
            need_be_same = ["caption_model",
                            "rnn_type", "rnn_size", "num_layers"]
            for checkme in need_be_same:
                assert vars(saved_model_opt)[checkme] == vars(opt)[
                    checkme], "Command line argument and saved model disagree on '%s' " % checkme

        if os.path.isfile(os.path.join(opt.start_from, 'histories_'+opt.id+'.pkl')):
            with open(os.path.join(opt.start_from, 'histories_'+opt.id+'.pkl'), 'rb') as f:
                histories = utils.pickle_load(f)
    else:
        infos['iter'] = 0
        infos['epoch'] = 0
        infos['iterators'] = loader.iterators
        infos['split_ix'] = loader.split_ix
        infos['vocab'] = loader.get_vocab()
    infos['opt'] = opt

    iteration = infos.get('iter', 0)
    epoch = infos.get('epoch', 0)

    val_result_history = histories.get('val_result_history', {})
    loss_history = histories.get('loss_history', {})
    lr_history = histories.get('lr_history', {})
    ss_prob_history = histories.get('ss_prob_history', {})

    loader.iterators = infos.get('iterators', loader.iterators)
    loader.split_ix = infos.get('split_ix', loader.split_ix)
    if opt.load_best_score == 1:
        best_val_score = infos.get('best_val_score', None)

    opt.vocab = loader.get_vocab()
    # DataParallel
    model = models.setup(opt).cuda()
    del opt.vocab
    dp_model = torch.nn.DataParallel(model)
    lw_model = LossWrapper(model, opt)
    dp_lw_model = torch.nn.DataParallel(lw_model)

    # not DataParallel
    # dp_model = models.setup(opt).cuda()
    # model = dp_model
    # del opt.vocab
    # dp_lw_model = LossWrapper(dp_model, opt)
    # lw_model = dp_lw_model
    
    epoch_done = True
    # Assure in training mode
    dp_lw_model.train()

    if opt.noamopt:
        assert opt.caption_model == 'transformer', 'noamopt can only work with transformer'
        optimizer = utils.get_std_opt(
            model, factor=opt.noamopt_factor, warmup=opt.noamopt_warmup)
        optimizer._step = iteration
    elif opt.reduce_on_plateau:
        optimizer = utils.build_optimizer(model.parameters(), opt)
        optimizer = utils.ReduceLROnPlateau(optimizer, factor=0.5, patience=3)
    else:
        optimizer = utils.build_optimizer(model.parameters(), opt)
    # Load the optimizer
    if vars(opt).get('start_from', None) is not None and os.path.isfile(os.path.join(opt.start_from, "optimizer.pth")):
        optimizer.load_state_dict(torch.load(
            os.path.join(opt.start_from, 'optimizer.pth')))

    def save_checkpoint(model, infos, optimizer, histories=None, append=''):
        if len(append) > 0:
            append = '-' + append
        # if checkpoint_path doesn't exist
        if not os.path.isdir(opt.checkpoint_path):
            os.makedirs(opt.checkpoint_path)
        checkpoint_path = os.path.join(
            opt.checkpoint_path, 'model%s.pth' % (append))
        torch.save(model.state_dict(), checkpoint_path)
        print("model saved to {}".format(checkpoint_path))
        optimizer_path = os.path.join(
            opt.checkpoint_path, 'optimizer%s.pth' % (append))
        torch.save(optimizer.state_dict(), optimizer_path)
        with open(os.path.join(opt.checkpoint_path, 'infos_'+opt.id+'%s.pkl' % (append)), 'wb') as f:
            utils.pickle_dump(infos, f)
        if histories:
            with open(os.path.join(opt.checkpoint_path, 'histories_'+opt.id+'%s.pkl' % (append)), 'wb') as f:
                utils.pickle_dump(histories, f)

    try:
        while True:
            if epoch_done:
                if not opt.noamopt and not opt.reduce_on_plateau:
                    # Assign the learning rate
                    if epoch > opt.learning_rate_decay_start and opt.learning_rate_decay_start >= 0:
                        frac = (
                            epoch - opt.learning_rate_decay_start) // opt.learning_rate_decay_every
                        decay_factor = opt.learning_rate_decay_rate ** frac
                        opt.current_lr = opt.learning_rate * decay_factor
                    else:
                        opt.current_lr = opt.learning_rate
                    # set the decayed rate
                    utils.set_lr(optimizer, opt.current_lr)
                # Assign the scheduled sampling prob
                if epoch > opt.scheduled_sampling_start and opt.scheduled_sampling_start >= 0:
                    frac = (
                        epoch - opt.scheduled_sampling_start) // opt.scheduled_sampling_increase_every
                    opt.ss_prob = min(
                        opt.scheduled_sampling_increase_prob * frac, opt.scheduled_sampling_max_prob)
                    model.ss_prob = opt.ss_prob

                # If start self critical training
                if opt.self_critical_after != -1 and epoch >= opt.self_critical_after:
                    sc_flag = True
                    init_scorer(opt.cached_tokens)
                else:
                    sc_flag = False

                epoch_done = False

            start = time.time()
            # Load data from train split (0)
            data = loader.get_batch('train')
            print('Read data:', time.time() - start)

            torch.cuda.synchronize()
            start = time.time()

            tmp = [data['fc_feats'], data['att_feats'],
                   data['labels'], data['masks'], data['att_masks']]
            tmp = [_ if _ is None else _.cuda() for _ in tmp]
            fc_feats, att_feats, labels, masks, att_masks = tmp

            optimizer.zero_grad()
            model_out = dp_lw_model(fc_feats, att_feats, labels, masks, att_masks,
                                    data['gts'], torch.arange(0, len(data['gts'])), sc_flag)

            loss = model_out['loss'].mean()

            loss.backward()
            utils.clip_gradient(optimizer, opt.grad_clip)
            optimizer.step()
            train_loss = loss.item()
            torch.cuda.synchronize()
            end = time.time()
            if not sc_flag:
                print("iter {} (epoch {}), train_loss = {:.3f}, time/batch = {:.3f}"
                      .format(iteration, epoch, train_loss, end - start))
            else:
                print("iter {} (epoch {}), avg_reward = {:.3f}, time/batch = {:.3f}"
                      .format(iteration, epoch, model_out['reward'].mean(), end - start))

            # Update the iteration and epoch
            iteration += 1
            if data['bounds']['wrapped']:
                epoch += 1
                epoch_done = True

            # Write the training loss summary
            if (iteration % opt.losses_log_every == 0):
                add_summary_value(tb_summary_writer,
                                  'train_loss', train_loss, iteration)
                if opt.noamopt:
                    opt.current_lr = optimizer.rate()
                elif opt.reduce_on_plateau:
                    opt.current_lr = optimizer.current_lr
                add_summary_value(tb_summary_writer,
                                  'learning_rate', opt.current_lr, iteration)
                add_summary_value(
                    tb_summary_writer, 'scheduled_sampling_prob', model.ss_prob, iteration)
                if sc_flag:
                    add_summary_value(
                        tb_summary_writer, 'avg_reward', model_out['reward'].mean(), iteration)

                loss_history[iteration] = train_loss if not sc_flag else model_out['reward'].mean(
                )
                lr_history[iteration] = opt.current_lr
                ss_prob_history[iteration] = model.ss_prob

            # update infos
            infos['iter'] = iteration
            infos['epoch'] = epoch
            infos['iterators'] = loader.iterators
            infos['split_ix'] = loader.split_ix

            # make evaluation on validation set, and save model
            if (iteration % opt.save_checkpoint_every == 0):
                # eval model
                eval_kwargs = {'split': 'val',
                               'dataset': opt.input_json}
                eval_kwargs.update(vars(opt))
                val_loss, predictions, lang_stats = eval_utils.eval_split(
                    dp_model, lw_model.crit, loader, eval_kwargs)

                if opt.reduce_on_plateau:
                    if 'CIDEr' in lang_stats:
                        optimizer.scheduler_step(-lang_stats['CIDEr'])
                    else:
                        optimizer.scheduler_step(val_loss)
                # Write validation result into summary
                add_summary_value(tb_summary_writer,
                                  'validation loss', val_loss, iteration)
                if lang_stats is not None:
                    for k, v in lang_stats.items():
                        add_summary_value(tb_summary_writer, k, v, iteration)
                val_result_history[iteration] = {
                    'loss': val_loss, 'lang_stats': lang_stats, 'predictions': predictions}

                # Save model if is improving on validation result
                if opt.language_eval == 1:
                    current_score = lang_stats['CIDEr']
                else:
                    current_score = - val_loss

                best_flag = False

                if best_val_score is None or current_score > best_val_score:
                    best_val_score = current_score
                    best_flag = True

                # Dump miscalleous informations
                infos['best_val_score'] = best_val_score
                histories['val_result_history'] = val_result_history
                histories['loss_history'] = loss_history
                histories['lr_history'] = lr_history
                histories['ss_prob_history'] = ss_prob_history

                save_checkpoint(model, infos, optimizer, histories)
                if opt.save_history_ckpt:
                    save_checkpoint(model, infos, optimizer,
                                    append=str(iteration))

                if best_flag:
                    save_checkpoint(model, infos, optimizer, append='best')

            # Stop if reaching max epochs
            if epoch >= opt.max_epochs and opt.max_epochs != -1:
                break
    except (RuntimeError, KeyboardInterrupt):
        print('Save ckpt on exception ...')
        save_checkpoint(model, infos, optimizer)
        print('Save ckpt done.')
        stack_trace = traceback.format_exc()
        print(stack_trace)
import dataloader
import torch
import numpy as np
base = r'C:\Users\shaha\Documents\DeepGit\DeepLearningSeminar\project\dataraw\train-clean-100'
batch_size = 1
T = 4
target_sr = 8e3
ds_train = dataloader.AudioDataset(path=base,
                                   batch_size=batch_size,
                                   T=T,
                                   target_sr=target_sr)
dl_train = dataloader.DataLoader(dataset=ds_train, batch_size=batch_size)
len_t = len(ds_train)
for idx, (x, y) in enumerate(dl_train):
    data = (x, y)
    torch.save(
        data,
        r'C:\Users\shaha\Documents\DeepGit\DeepLearningSeminar\project\data\{}.pt'
        .format(idx))
    precentile_done = round(100 * (idx + 1) / len_t)
    progress_symbols = int(np.floor(precentile_done * 80 / 100))
    print('\r[' + ('#') * progress_symbols + (' ') * (80 - progress_symbols) +
          ']' + 'progress {}/100%'.format(precentile_done),
          end='')
print('*' * 80)
print('finished !')
Esempio n. 28
0
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "1"

import sys

from models import attgan
from tensorflow.keras import losses, optimizers, metrics
import numpy as np
import tensorflow as tf
import dataloader
import utils

from settings import *

train_dloader = dataloader.DataLoader("train", BATCH_SIZE)
valid_dloader = dataloader.DataLoader("valid", BATCH_SIZE)
test_dloader = dataloader.DataLoader("test", BATCH_SIZE)

model = attgan.AttGAN()

# criterion_reconstruction = losses.MeanAbsoluteError()
# criterion_adversarial = losses.BinaryCrossentropy()
# criterion_attribute = losses.BinaryCrossentropy()


def criterion_MAE(y_true, y_pred):
    n = y_true.shape[0]

    loss = tf.math.abs(y_true - y_pred)
    loss = tf.reshape(loss, shape=(n, -1))
    loss = tf.reduce_sum(loss, axis=-1)
Esempio n. 29
0
def train(model_dict=None, using_cuda=True, learning_rate=0.06,\
    momentum=0.3, batch_size=32, epochs=5, coef=1.0, interval=10):
    """
    training procedure

    Args: 
    If model_dict is given (a file address), it will continue training on the given model.
    Otherwise, it would train a new model from scratch.
    If using_cuda is true, the training would be conducted on GPU.
    Learning_rate and momentum is for SGD optimizer.
    coef is the coefficent between the cross-entropy loss and the penalization term.
    interval is the frequncy of reporting.

    the result will be saved with a form "model_dict_+current time", which could be used for further training
    """

    if using_cuda:
        net = Net().cuda()
    else:
        net = Net()

    if model_dict != None:
        net.load_state_dict(torch.load(model_dict))

    optimizer = optim.SGD(net.parameters(),
                          lr=learning_rate,
                          momentum=momentum)
    criterion = nn.CrossEntropyLoss()
    dataset = dataloader.DataLoader("train_set.pkl",
                                    batch_size,
                                    using_cuda=using_cuda)

    #statistics
    loss_count = 0
    prepare_time = 0
    run_time = 0
    count = 0

    for epoch in range(epochs):
        print("epoch: %d" % (epoch))
        for i, batch in enumerate(dataset):
            t1 = time.time()
            X = batch["feature"]
            y = batch["class"]

            t2 = time.time()
            y_pred, y_penl = net(X)
            loss = criterion(y_pred, y) + torch.sum(y_penl) / batch_size * coef
            optimizer.zero_grad()
            loss.backward()
            nn.utils.clip_grad_norm(net.parameters(), 0.5)
            optimizer.step()
            t3 = time.time()

            loss_count += torch.sum(y_penl).data[0]
            prepare_time += (t2 - t1)
            run_time += (t3 - t2)
            p, idx = torch.max(y_pred.data, dim=1)
            count += torch.sum(torch.eq(idx.cpu(), y.data.cpu()))

            if (i + 1) % interval == 0:
                print("epoch : %d, iters: %d" % (epoch, i + 1))
                print("loss count:" + str(loss_count /
                                          (interval * batch_size)))
                print("acuracy:" + str(count / (interval * batch_size)))
                print("penalty:" + str(torch.sum(y_penl).data[0] / batch_size))
                print("prepare time:" + str(prepare_time))
                print("run time:" + str(run_time))
                prepare_time = 0
                run_time = 0
                loss_count = 0
                count = 0
        string = time.strftime("%Y-%m-%d-%H:%M:%S", time.localtime())
        torch.save(net.state_dict(), "model_dict_%s.dict" % (string))
Esempio n. 30
0
def pipeline(args):

    ### Experiment setting
    update_feature = args.update_feature
    resume_epoch = args.resume_epoch
    lr = args.lr

    exp_name = 'RNN'
    num_epoch = 15

    save_period = 200
    log_period = 50
    lr_scheduler_period = 5

    lr_decay = 0.5
    batchsize = 256
    num_tf_thread = 8
    clip_grad = 5.0

    ### step1 : prepare data
    data = feature.PreprocessingRawdata(update_feature=update_feature)
    label = feature.GetLabels(data)
    data_standardized = feature.Standardize(data)

    data_train, data_validation, data_test, label_train, label_validation, label_test = \
        feature.SplitData(data_standardized, label)

    # Get data iterator
    training_loader = dataloader.DataLoader(data=data_train,
                                            label=label_train,
                                            batchsize=batchsize,
                                            time=cfg.time.train_timeslots,
                                            mode='train')

    validation_loader = dataloader.DataLoader(
        data=data_validation,
        label=label_validation,
        batchsize=1,
        time=cfg.time.validation_timeslots,
        mode='validation')

    testing_loader = dataloader.DataLoader(data=data_test,
                                           label=label_test,
                                           batchsize=1,
                                           time=cfg.time.test_timeslots,
                                           mode='test')

    ### step2 : training

    # files
    exp_dir = os.path.join(exp_name)
    if os.path.exists(exp_dir) is False:
        os.makedirs(exp_dir)

    # Get Graph
    logging.info('Building Computational Graph...')

    # Get input shapes
    feature_name = data.minor_axis
    shapes = {}
    input_nodes = list(cfg.model.link.keys()) + ['weather']
    for key in input_nodes:
        shapes[key] = len(
            [item for item in feature_name if item.startswith(key)])
    # Build Graph
    prediction, loss, metric, label_sym = model.Build(shapes)

    # Optimizer
    learning_rate = tf.placeholder(shape=[],
                                   dtype=tf.float32,
                                   name='learning_rate')
    # optimizer = tf.train.GradientDescentOptimizer(learning_rate)
    # Clip gradient
    optimizer = tf.train.AdamOptimizer(
        learning_rate=learning_rate).minimize(loss)
    # gvs = optimizer.compute_gradients(loss)
    # capped_gvs = [(tf.clip_by_value(grad, -clip_grad, clip_grad), var) for grad, var in gvs]
    # optimizer = optimizer.apply_gradients(capped_gvs)
    # create session ans saver
    sess = tf.Session(config=tf.ConfigProto(
        intra_op_parallelism_threads=num_tf_thread))
    # saver = tf.train.Saver()

    # Build the summary operation and summary writer
    # Training_MAPE = tf.placeholder(shape=[], dtype=tf.float32, name='Training_MAPE')
    # Validation_MAPE = tf.placeholder(shape=[], dtype=tf.float32, name='Validation_MAPE')
    # training_summary = tf.summary.scalar("Training_MAPE", Training_MAPE)
    # validation_summary = tf.summary.scalar("Validation_MAPE", Validation_MAPE)
    # learning_rate_summary = tf.summary.scalar("Learning_rate", learning_rate)
    # summary_writer = tf.summary.FileWriter(exp_dir, sess.graph)

    # Model params
    if resume_epoch == 0:
        # initializing
        init = tf.global_variables_initializer()
        logging.info('Initializing params...')
        sess.run(init)
    else:
        ## Loading
        logging.info('Loading the model of epoch[{}]...'.format(resume_epoch))
        saver.restore(sess, exp_dir + '/model-{}'.format(resume_epoch))

    #training
    logging.info("Starting training...")
    for epoch in range(resume_epoch + 1, num_epoch + 1):
        # Reset loader and metric
        training_loader.reset()
        validation_loader.reset()

        error_training = np.zeros((36))
        count_training = np.zeros((36))

        error_validation = np.zeros((36))
        count_validation = np.zeros((36))

        tic = time.time()
        # Training
        for batch in training_loader:
            # concat data and label
            data = batch.data
            data.update(batch.label)
            # print (data['loss_scale:0'].shape, data['loss_scale:0'].dtype)
            data['learning_rate:0'] = lr
            data['is_training:0'] = True
            # Feed data into graph
            _, error, label_batch = sess.run([optimizer, metric, label_sym],
                                             feed_dict=data)
            mask = (label_batch == label_batch)
            # Update metric
            error_training = error_training + error.sum(0)
            count_training += mask.sum(0)

        toc = time.time()

        # validation
        for batch in validation_loader:
            # concat data and label
            data = batch.data
            data.update(batch.label)
            data['is_training:0'] = False
            # Feed data into graph
            error, label_batch = sess.run([metric, label_sym], feed_dict=data)
            mask = (label_batch == label_batch)
            # Update metric
            error_validation = error_validation + error.sum(0)
            count_validation += mask.sum(0)

        # Speend and Error
        logging.info(
            "Epoch[{}] Speed:{:.2f} samples/sec Training MAPE={:.5f} Validation_MAPE={:.5f}"
            .format(epoch, training_loader.data_num / (toc - tic),
                    error_training.sum() / count_training.sum(),
                    error_validation.sum() / count_validation.sum()))
        print('training', (error_training / count_training).reshape(6, 6))
        print('validation',
              (error_validation / count_validation).reshape(6, 6))

        # Summary
        # if (epoch % log_period == 0):
        #     train_summ, validation_summ, lr_summ = sess.run([training_summary,
        #                                                      validation_summary,
        #                                                      learning_rate_summary],
        #                                             feed_dict={'Training_MAPE:0' : error_training.mean(),
        #                                                        'Validation_MAPE:0' : error_validation.mean(),
        #                                                        'learning_rate:0' : lr})
        #     summary_writer.add_summary(train_summ, epoch)
        #     summary_writer.add_summary(validation_summ, epoch)
        #     summary_writer.add_summary(lr_summ, epoch)

        # Save checkpoint
        # if (epoch % save_period == 0):
        #     logging.info("Saving model of Epoch[{}]...".format(epoch))
        #     saver.save(sess, exp_dir + '/model', global_step=epoch)

        # Learning rate schedule
        if (epoch % lr_scheduler_period == 0):
            lr *= lr_decay

    logging.info("Optimization Finished!")

    # Prediction
    keys = list(prediction.keys())
    keys.sort()
    prediction = [prediction[key] for key in keys]
    traveltime_result = []
    for batch in testing_loader:
        data = batch.data
        data['is_training:0'] = False
        aux = batch.aux
        # Feed data into graph
        pred = sess.run(prediction, feed_dict=data)
        for index, key in enumerate(keys):
            intersection, tollgate = key.split('_')
            tollgate = tollgate[-1]
            time_now = cfg.time.test_timeslots[aux + 6:aux + 12]
            for i in range(6):
                avg_time = pred[index][0][i]
                left = datetime.strptime(time_now[i], "%Y-%m-%d %H:%M:%S")
                right = left + timedelta(minutes=cfg.time.time_interval)

                item = dict(intersection_id=intersection,
                            tollgate_id=tollgate,
                            time_window='[{},{})'.format(left, right),
                            avg_travel_time=avg_time)
                traveltime_result.append(item)

    # save prediction
    traveltime_result = pd.DataFrame(traveltime_result,
                                     columns=[
                                         'intersection_id', 'tollgate_id',
                                         'time_window', 'avg_travel_time'
                                     ])
    traveltime_result.to_csv(os.path.join(
        cfg.data.prediction_dir, '{}_travelTime.csv'.format(exp_name)),
                             sep=',',
                             header=True,
                             index=False)
    logging.info('Prediction Finished!')
    sess.close()