Example #1
0
	args.save_epoch_model = int(args.save_epoch_model*args.epdl)


if not args.log_file is None:
	sys.stdout = open(args.log_file,'w')
	sys.stderr = sys.stdout

torch.manual_seed(args.seed)


start_epoch, num_epochs = 1, args.epochs
batch_size = args.batch_size
best_acc = 0.

print('\n[Phase 1] : Data Preparation')
trainset, testset, num_classes, series_length = datasets.get_data(args)

sys.stdout.flush()
#abstain class id is the last class
abstain_class_id = num_classes
#simulate label noise if needed
#trainset = label_noise.label_noise(args, trainset, num_classes)
#set data loaders
trainloader = torch.utils.data.DataLoader(trainset, batch_size=args.batch_size, shuffle=True, num_workers=2)
testloader = torch.utils.data.DataLoader(testset, batch_size=args.test_batch_size, shuffle=False, num_workers=2)

if args.save_train_scores:
	train_perf_loader = torch.utils.data.DataLoader(trainset, batch_size=args.batch_size, shuffle=False, num_workers=2)

def getNetwork(args):
	if args.loss_fn is None:
Example #2
0
if not args.log_file is None:
	sys.stdout = open(args.log_file,'w')
	sys.stderr = sys.stdout

torch.manual_seed(args.seed)


start_epoch, num_epochs = 1, args.epochs
batch_size = args.batch_size
best_acc = 0.

device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

print('\n[Phase 1] : Data Preparation')
trainset, testset, num_classes, series_length, train_sampler, valid_sampler = datasets.get_data(args)

if args.kfold != 0:
    x_train, y_train, x_test, y_test  = datasets.getdatasetDict(args)
     # Merge inputs and targets
    combinedInputs = np.concatenate((x_train, x_test), axis=0)
    combinedTargets = np.concatenate((y_train, y_test), axis=0)
    combinedDataset = kfold_torch_dataset.KfoldTorchDataset(combinedInputs, combinedTargets)
    

#abstain class id is the last class
abstain_class_id = num_classes
trainloader = testloader = train_perf_loader = {}
abstain_class_id
def generateDataNormal():
    global trainloader
import sys
import time
import datetime

import numpy as np

from utils import gpu_utils, datasets, label_noise, kfold_torch_dataset

from networks import lstm, inceptionNet
from networks import config as cf

device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

print('\n[Phase 1] : Data Preparation')
# get data for the provided noise percentage
trainset, testset, num_classes, series_length, train_sampler, valid_sampler = datasets.get_data(
    args)

# get data for simple dataset
#args.noise_percentage = 0
#args.iteration = 0
trainset_no_noise, testset_no_noise, num_classes_no_noise, series_length_no_noise, train_sampler_no_noise, valid_sampler_no_noise = datasets.get_data(
    args)

# TODO: have to generate val and training for ai_crop dataset
trainloader = torch.utils.data.DataLoader(trainset,
                                          batch_size=args.batch_size,
                                          shuffle=False,
                                          num_workers=2)
testloader = torch.utils.data.DataLoader(testset,
                                         batch_size=args.test_batch_size,
                                         shuffle=False,
Example #4
0
if not args.save_epoch_model is None:
    args.save_epoch_model = int(args.save_epoch_model * args.epdl)

if not args.log_file is None:
    sys.stdout = open(args.log_file, 'w')
    sys.stderr = sys.stdout

torch.manual_seed(args.seed)

start_epoch, num_epochs = 1, args.epochs
batch_size = args.batch_size
best_acc = 0.

print('\n[Phase 1] : Data Preparation')
trainset, testset, num_classes = datasets.get_data(args)
sys.stdout.flush()
#abstain class id is the last class
abstain_class_id = num_classes
#simulate label noise if needed
trainset = label_noise.label_noise(args, trainset, num_classes)
#set data loaders
trainloader = torch.utils.data.DataLoader(trainset,
                                          batch_size=args.batch_size,
                                          shuffle=True,
                                          num_workers=2)
testloader = torch.utils.data.DataLoader(testset,
                                         batch_size=args.test_batch_size,
                                         shuffle=False,
                                         num_workers=2)
Example #5
0
parse.add_argument('--dataset', type=str, default='mnist')
parse.add_argument('--batch_size', type=int, default=256)

parse.add_argument('--learning_rate', type=float, default=1e-3)
parse.add_argument('--epochs', type=int, default=200)
parse.add_argument('--vis', type=bool, default=True)

parse.add_argument('--model', type=str, default='beta_VAE')
parse.add_argument('--input_dim', type=int, default=784)
parse.add_argument('--latent_dim', type=int, default=2)
parse.add_argument('--hid_dims', type=list, default=[400, 200, 50])
parse.add_argument('--beta', type=float, default=1)

args = parse.parse_args()

train_data, test_data = get_data(args)

model_module = import_module('models.' + args.model)
model = model_module.make_model(args)

opti = Adam(model.parameters(), lr=args.learning_rate)

if args.vis:
    writer = SummaryWriter(log_dir='./runs')

epoch_bar = tqdm(range(args.epochs))

for epoch in epoch_bar:
    epoch_loss = 0

    batch_bar = tqdm(train_data[1])