Esempio n. 1
0
def net_fn(inputs, theta=None, is_training=True):
  if args.model == 'mlp':
    mlp = models.MLP(
        nlayers=args.nlayers, nhid=args.nhid, with_bias=True, batch_norm=False)
    return mlp(inputs, theta, is_training)
  elif args.model == 'resnet':
    network = models.Net(args.model_size)
    return network(inputs, is_training)
Esempio n. 2
0
 def __init__(self, ex_name=experiment_name, m_name=model_name):
     net = models.Net(name=experiment_name)
     net.load_state_dict(
         torch.load(os.path.join(root, experiment_name, model_name)))
     use_cuda = torch.cuda.is_available()
     if use_cuda:
         net.cuda()
     self.net = net
     self.transforms = transforms.Compose([transforms.ToTensor()])
     self.cuda = use_cuda
Esempio n. 3
0
def run_experiments(finetune, kernel_sizes, filters, lr, pooling, weight_decay,
                    other_params):
    global embeddings_matrix, training_set, validation_set

    other_params['commit_hash'] = commit_hash

    (vocab_size, dimensions) = embeddings_matrix.shape
    net = models.Net(dimensions=dimensions,
                     finetune=finetune,
                     vocab_size=vocab_size,
                     kernel_sizes=kernel_sizes,
                     filters=filters,
                     dropout_rate=0.5,
                     pooling=pooling,
                     lr=lr,
                     weight_decay=weight_decay,
                     embeddings_matrix=embeddings_matrix)

    hyperparams = util.fill_dict(net.hyperparameters, other_params)
    logger.info('experiment with hyperparameters: {}'.format(
        json.dumps(hyperparams, sort_keys=True, indent=None)))

    with get_archiver(datadir='data/models',
                      suffix="_" + commit_hash[:6]) as a1, get_archiver(
                          datadir='data/results',
                          suffix="_" + commit_hash[:6]) as a:

        save_model(hyperparams, net, a.getFilePath)

        early_stopping = train.EarlyStopping(c.monitor, c.patience,
                                             c.monitor_objective)
        model_checkpoint = train.ModelCheckpoint(a1.getFilePath('checkpoint'))
        csv_logger = train.CSVLogger(a.getFilePath('logger.csv'))

        adam_config = train.AdamConfig(
            lr=net.hyperparameters['lr'],
            beta_1=net.hyperparameters['beta_1'],
            beta_2=net.hyperparameters['beta_2'],
            epsilon=net.hyperparameters['epsilon'],
            weight_decay=net.hyperparameters['weight_decay'])

        history = train.fit(
            net,
            training_set,
            validation_set,
            batch_size=c.batch_size,
            epochs=c.epochs,
            validation_split=0.2,
            callbacks=[early_stopping, model_checkpoint, csv_logger],
            optimizer=adam_config)

        save_history(history, a.getDirPath())

    return
Esempio n. 4
0
def main():
    global args, best_micro, base_sum
    args = parser.parse_args()

    base_transf, embed = data_got.one_sample_base2avg(
        batch_size=args.batch_size,
        sample_num=args.num_sample,
        samp_freq=args.samp_freq,
        num_class=args.num_class)
    Ftest_loader, novel_loader, novelall_loader, _ = data_got.Nload_data(
        batch_size=args.batch_size,
        sample_num=args.num_sample,
        num_class=args.num_class)
    embed = torch.from_numpy(embed).float()
    if not os.path.isdir(args.checkpoint):
        mkdir_p(args.checkpoint)

    model = models.Net(embed, args.lstm_hid_dim,
                       num_classes=args.num_class).cuda()

    print('==> Reading from model checkpoint..')
    assert os.path.isfile(
        args.model), 'Error: no model checkpoint directory found!'
    checkpoint = torch.load(args.model)
    model.load_state_dict(checkpoint['state_dict'])
    print("=> loaded model checkpoint '{}' (epoch {})".format(
        args.model, checkpoint['epoch']))
    cudnn.benchmark = True
    real_weight = model.classifier.fc.weight.data

    criterion = nn.MSELoss()
    trans_model = models.Transfer().cuda()
    optimizer = torch.optim.Adam(trans_model.parameters(),
                                 lr=0.012,
                                 betas=(0.9, 0.99))

    for epoch in range(args.epochs):
        train_loss, base_sum = train(base_transf, trans_model, model,
                                     criterion, optimizer, real_weight)
        print("loss", train_loss)
    imprint(novel_loader, model, trans_model, base_sum)
    model_criterion = nn.BCELoss()

    model_optimizer = torch.optim.Adam(model.parameters(),
                                       lr=args.lr,
                                       betas=(0.9, 0.99))
    for i in range(2):
        print("ft start")
        fine_tuning(novelall_loader, model, model_criterion, model_optimizer)
    validate(Ftest_loader, model)
Esempio n. 5
0
def main():
    import argparse
    parser = argparse.ArgumentParser()
    parser.add_argument('expert_policy_file', type=str)
    parser.add_argument('envname', type=str)
    parser.add_argument('--num_its', type=int, default=10)
    parser.add_argument('--num_train_rolls', type=int, default=20)
    parser.add_argument('--num_epochs', type=int, default=5)
    args = parser.parse_args()

    with open(
            op.join(
                'expert_data', args.envname + '_' +
                '{}_eps.pkl'.format(args.num_train_rolls)),
            "rb") as input_file:
        train_data = pickle.load(input_file)

    print("Observations : ", train_data['observations'].shape)
    print("Actions : ", train_data['actions'].shape)

    data = {}
    data['X'], data['y'] = {}, {}
    data['X']['train'], data['X']['test'], data['y']['train'], data['y'][
        'test'] = train_test_split(train_data['observations'],
                                   np.squeeze(train_data['actions']),
                                   test_size=0.2)

    # Store checkpoints as well
    net = models.Net(obs_size=train_data['observations'].shape[1],
                     hidden=256,
                     act_space_size=train_data['actions'].shape[2])

    net.compile(optimizer='adam', loss='mean_squared_error')

    env = gym.make(args.envname)
    max_steps = env.spec.max_episode_steps
    policy_fn = load_policy.load_policy(args.expert_policy_file)

    train_dagger(env,
                 expert=policy_fn,
                 policy=net,
                 num_its=args.num_its,
                 num_epochs=args.num_epochs,
                 data=data,
                 cp_dir='checkpoints/Dagger',
                 cp_name='{}_train_rolls_{}_epochs_'.format(
                     args.num_train_rolls, args.num_epochs))
Esempio n. 6
0
def main():
    global args, best_micro
    args = parser.parse_args()

    base_transf, embed = data_got.one_sample_base2avg(batch_size=args.batch_size,sample_num=args.num_sample,samp_freq=args.Bsamp_freq,num_class=args.num_class)
    Ftest_loader, novel_loader,novelall_loader,test_y = data_got.Nload_data(batch_size=args.batch_size,sample_num=args.num_sample,samp_freq=args.Nsamp_freq,num_class=args.num_class)
    embed = torch.from_numpy(embed).float()
    model = models.Net(embed, args.lstm_hid_dim, num_classes=args.num_class).cuda()

    print('==> Reading from model checkpoint..')
    assert os.path.isfile(args.model), 'Error: no model checkpoint directory found!'
    checkpoint = torch.load(args.model)
    model.load_state_dict(checkpoint['state_dict'])
    print("=> loaded model checkpoint '{}' (epoch {})"
          .format(args.model, checkpoint['epoch']))
    cudnn.benchmark = True
    real_weight=model.classifier.fc.weight.data

    criterion = nn.MSELoss()
    trans_model = models.Transfer().cuda()
    optimizer = torch.optim.Adam( trans_model.parameters(), lr=0.013, betas=(0.9, 0.99))

    for epoch in range(args.epochs):
        train_loss= train(base_transf, trans_model,model, criterion,  optimizer,real_weight)
        print("loss",train_loss)
    tail_weight=imprint(novel_loader, model,trans_model)
    # model_criterion= nn.BCELoss()
    #
    # model_optimizer = torch.optim.Adam(model.parameters(), lr=args.lr, betas=(0.9, 0.99))
    # for i in range(3):
    #     train_loss, trn_micro, trn_macro = fine_tuning(novelall_loader, model,  model_criterion, model_optimizer)
    output_all=[]
    F1 = np.zeros(54)
    for i in range(args.Nsamp_freq):
        print("ensemble start!!!!!!!! this is calssifier",i)
        model.classifier.fc.weight.data = tail_weight[i]
        output = validate(Ftest_loader, model)
        output_all.append(output)
    output_all =(torch.sum(torch.tensor(output_all),0))/args.Nsamp_freq
    output_all[output_all > 0.5] = 1
    output_all[output_all<= 0.5] = 0
    for l in range(54):
        F1[l] = f1_score(test_y[:, l], output_all[:, l], average='binary')
    print("each class result f1!!!!!!!!!!!!!!")
    print(F1)
Esempio n. 7
0
File: nfq.py Progetto: anuhyav/nfq
def main():
    train_env = envs.make_cartpole(100)
    test_env = envs.make_cartpole(3000)
    set_random_seeds(train_env, test_env)

    net = models.Net()
    optimizer = optim.Rprop(net.parameters())
    # TODO Initialize weights randomly within [-0.5, 0.5]

    for epoch in range(500):
        rollout = generate_rollout(train_env, net)
        if epoch % 10 == 9:
            print('Epoch {:4d} | Steps: {:3d}'.format(epoch + 1, len(rollout)))
        train(net, optimizer, rollout)
        hint_to_goal(net, optimizer)
        # test(test_env, net)

    train_env.close()
    test_env.close()
Esempio n. 8
0
            n = 0
            dirs = []
            dirs.append(name)
            dirs = np.concatenate(dirs)
            for i in pred:
                result = Image.fromarray((i).astype(np.uint8))
                result.save(args.save_img + '/' + dirs[n])
                #torchvision.utils.save_image(i, 'preds/' + dirs[n])
                n += 1


if __name__ == '__main__':
    args = parser_2.arg_parse()
    ''' setup GPU '''
    torch.cuda.set_device(args.gpu)
    ''' prepare data_loader '''
    print('===> prepare data loader ...')

    test_loader2 = torch.utils.data.DataLoader(data2.DATA2(args),
                                               batch_size=args.test_batch,
                                               num_workers=args.workers,
                                               shuffle=False)
    ''' prepare mode '''
    model = models.Net(args)
    model_std = torch.load(os.path.join('model_best.pth.tar'))
    model.load_state_dict(model_std)
    model.cuda()
    print("model loaded")

    save_imgs(model, test_loader2, args)
Esempio n. 9
0
def main():
    global args, best_prec1
    args = parser.parse_args()

    if not os.path.isdir(args.checkpoint):
        mkdir_p(args.checkpoint)

    model = models.Net(num_classes=200, norm=False, scale=False).cuda()

    cudnn.benchmark = True

    # Data loading code
    normalize = transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])

    train_dataset = loader.ImageLoader(args.data,
                                       transforms.Compose([
                                           transforms.Resize(256),
                                           transforms.RandomCrop(224),
                                           transforms.RandomHorizontalFlip(),
                                           transforms.ToTensor(),
                                           normalize,
                                       ]),
                                       train=True,
                                       num_classes=200,
                                       num_train_sample=args.num_sample)

    train_loader = torch.utils.data.DataLoader(
        train_dataset,
        batch_size=args.batch_size,
        sampler=train_dataset.get_balanced_sampler(),
        num_workers=args.workers,
        pin_memory=True)

    val_loader = torch.utils.data.DataLoader(loader.ImageLoader(
        args.data,
        transforms.Compose([
            transforms.Resize(256),
            transforms.CenterCrop(224),
            transforms.ToTensor(),
            normalize,
        ]),
        num_classes=200,
        novel_only=args.test_novel_only),
                                             batch_size=args.batch_size,
                                             shuffle=False,
                                             num_workers=args.workers,
                                             pin_memory=True)

    print('    Total params: %.2fM' %
          (sum(p.numel() for p in model.parameters()) / 1000000.0))

    # define loss function (criterion) and optimizer
    criterion = nn.CrossEntropyLoss().cuda()

    extractor_params = list(map(id, model.extractor.parameters()))
    classifier_params = filter(lambda p: id(p) not in extractor_params,
                               model.parameters())

    optimizer = torch.optim.SGD([{
        'params': model.extractor.parameters()
    }, {
        'params': classifier_params,
        'lr': args.lr * 10
    }],
                                lr=args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
                                                step_size=4,
                                                gamma=0.94)

    title = 'AllJoint'
    if args.resume:
        print('==> Resuming from checkpoint..')
        assert os.path.isfile(
            args.resume), 'Error: no checkpoint directory found!'
        checkpoint = torch.load(args.resume)
        args.start_epoch = checkpoint['epoch']
        best_prec1 = checkpoint['best_prec1']
        model.load_state_dict(checkpoint['state_dict'])
        optimizer.load_state_dict(checkpoint['optimizer'])
        print("=> loaded checkpoint '{}' (epoch {})".format(
            args.resume, checkpoint['epoch']))
        logger = Logger(os.path.join(args.checkpoint, 'log.txt'),
                        title=title,
                        resume=True)
    else:
        logger = Logger(os.path.join(args.checkpoint, 'log.txt'), title=title)
        logger.set_names([
            'Learning Rate', 'Train Loss', 'Valid Loss', 'Train Acc.',
            'Valid Acc.'
        ])

    if args.evaluate:
        validate(val_loader, model, criterion)
        return

    for epoch in range(args.start_epoch, args.epochs):
        scheduler.step()
        lr = optimizer.param_groups[1]['lr']
        print('\nEpoch: [%d | %d] LR: %f' % (epoch + 1, args.epochs, lr))
        # train for one epoch
        train_loss, train_acc = train(train_loader, model, criterion,
                                      optimizer, epoch)

        # evaluate on validation set
        test_loss, test_acc = validate(val_loader, model, criterion)

        # append logger file
        logger.append([lr, train_loss, test_loss, train_acc, test_acc])

        # remember best prec@1 and save checkpoint
        is_best = test_acc > best_prec1
        best_prec1 = max(test_acc, best_prec1)
        save_checkpoint(
            {
                'epoch': epoch + 1,
                'state_dict': model.state_dict(),
                'best_prec1': best_prec1,
                'optimizer': optimizer.state_dict(),
            },
            is_best,
            checkpoint=args.checkpoint)

    logger.close()
    logger.plot()
    savefig(os.path.join(args.checkpoint, 'log.eps'))

    print('Best acc:')
    print(best_prec1)
Esempio n. 10
0
import models
import data
import torch
import os
from tqdm import tqdm
import wandb
import numpy as np
import matplotlib.pyplot as plt
from utils import compute_accuracy, visualize_predictions
from setup import (BATCH_SIZE, MODEL, DEVICE, LEARNING_RATE, NB_EPOCHS,
                   FILE_NAME, DROPOUT, WEIGHTS_INIT, ALPHA, KEEP_WEIGHTS, DENOISER)


train_loader, test_loader = data.get_loaders(BATCH_SIZE)

model = models.Net(MODEL, dropout=DROPOUT)
if WEIGHTS_INIT:
    model.apply(models.init_weights)
model.to(DEVICE)

wandb.watch(model)

if KEEP_WEIGHTS and os.path.exists(f"weights/{FILE_NAME}.pt"):
    print("Weights found")
    model.load_state_dict(torch.load(f"weights/{FILE_NAME}.pt"))
else:
    os.makedirs("weights/", exist_ok=True)
    print("Weights not found")

if DENOISER:
    denoiser = models.DeNoiser()
Esempio n. 11
0
    final_skill_sample.append(Flat_input)
    print("LENGTH OF ACTUAL MODEL IS", len(Flat_input))
    if len(task_samples) == 0:
        accuracies = CAE_AE_TRAIN(net_shapes,
                                  task_samples + final_skill_sample, 50)
    else:
        accuracies = CAE_AE_TRAIN(net_shapes,
                                  task_samples + final_skill_sample, 250)
    return accuracies


######################################################################################################
#                                   GLOBAL VARIABLES
######################################################################################################
#net=models.Net().to(device)
net_reset = models.Net().to(device)
Actual_Accuracy = []
criterion = nn.CrossEntropyLoss()
student_model = models.SPLIT_CIFAR_CAE_TWO_SKILLS().to(device)
teacher_model = models.SPLIT_CIFAR_CAE_TWO_SKILLS().to(device)
vae_optimizer = optim.Adam(student_model.parameters(),
                           lr=0.0001)  #, amsgrad=True)
lam = 0.0001
Actual_Accuracy = []
threshold_batchid = []
#biased training variables
nSamples = 10
nBiased = min(nSamples, 10)
trainBias = 0.5
minReps = 1
nReps = 20
Esempio n. 12
0
torch.backends.cudnn.benchmark = True

source_dataset, target_dataset = get_dataset(args.task)

source_loader = torch.utils.data.DataLoader(source_dataset,
                                            batch_size=args.batch_size,
                                            shuffle=True,
                                            num_workers=0)

target_loader = torch.utils.data.DataLoader(target_dataset,
                                            batch_size=args.batch_size,
                                            shuffle=True,
                                            num_workers=0)

model = models.Net(task=args.task).cuda()

if args.task == 's2m':
    optimizer = torch.optim.SGD(model.parameters(),
                                args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay,
                                nesterov=True)
else:
    optimizer = torch.optim.Adam(model.parameters(), args.lr)

if args.resume:
    print("=> loading checkpoint '{}'".format(args.resume))
    checkpoint = torch.load(args.resume)
    args.start_epoch = checkpoint['epoch']
    best_prec1 = checkpoint['best_prec1']
Esempio n. 13
0
idx_permute = [np.random.permutation(28**2) for _ in range(10)]
#=vis.bar(X=[1,2,3])
options_task_network = dict(fillarea=True,
                            width=400,
                            height=400,
                            xlabel='Task Network',
                            ylabel='T',
                            title='TASK_NETWORK')
#win_task_network = vis.line(Y=np.array([0.001,0.001]),win='Task Net Dist',name='TASK NET DIST',opts=options_task_network)
######################################################################################################
#                                   TRAINING STARTS HERE - MNIST + CAE
######################################################################################################

allAcc = []
for permuatation in range(0, 10):
    model = models.Net().to(device)
    optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9)
    task_samples = []
    print("########## \n Threshold id is", threshold_batchid, "\n#########")
    Train_loader, Test_loader = RELOAD_DATASET(idx_permute[permuatation])
    SHOW_TEST_TRAIN_IMAGES_SAMPLE(permuatation)
    accuracy = train(model, Train_loader, Test_loader, optimizer, 3,
                     'MNIST Skill ' + str(permuatation))
    options = dict(fillarea=True,
                   width=400,
                   height=400,
                   xlabel='Skill',
                   ylabel='Actual_Accuracy',
                   title='Actual_Accuracy')
    Actual_Accuracy.append(int(accuracy))
    print("Actual acc", Actual_Accuracy)
                    help='LR is multiplied by gamma on schedule.')
parser.add_argument(
    '--log-interval',
    type=int,
    default=10,
    metavar='N',
    help='how many batches to wait before logging training status')
args = parser.parse_args()
use_cuda = not args.no_cuda and torch.cuda.is_available()

torch.manual_seed(args.seed)

device = torch.device("cuda" if use_cuda else "cpu")

#model = Resnet.ResNet18()
net = models.Net(
)  #dense_net2.DenseNet( growthRate=12)#Resnet.ResNet18()Resnet.Net() #
print(net)
criterion = nn.CrossEntropyLoss()
# if Flags['CIFAR_100'] :
#     optimizer = optim.SGD(model.parameters(), lr=0.1, momentum=0.9,weight_decay=1e-4)
# else:
#     optimizer = optim.SGD(model.parameters(), lr=0.1, momentum=0.9)

transform_train = transforms.Compose([
    transforms.RandomCrop(32, padding=4),
    transforms.RandomHorizontalFlip(),
    transforms.ToTensor(),
    transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])

transform_test = transforms.Compose([
Esempio n. 15
0
def main():
    global args
    args = parser.parse_args()

    debug = 0  # 0: normal mode 1: debug mode

    # Data loading code
    # args.data: path to the dataset
    traindir = os.path.join(args.data, 'RealChallengeFree/train')
    testdir = os.path.join(args.data, 'RealChallengeFree/Test')

    train_dataset = utils.CURETSRDataset(
        traindir,
        transforms.Compose([
            transforms.Resize([28, 28]),
            transforms.ToTensor(), utils.l2normalize, utils.standardization
        ]))
    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=args.batch_size,
                                               shuffle=True,
                                               num_workers=args.workers,
                                               pin_memory=True)

    test_dataset = utils.CURETSRDataset(
        testdir,
        transforms.Compose([
            transforms.Resize([28, 28]),
            transforms.ToTensor(), utils.l2normalize, utils.standardization
        ]))
    test_loader = torch.utils.data.DataLoader(test_dataset,
                                              batch_size=args.batch_size,
                                              shuffle=False,
                                              num_workers=args.workers,
                                              pin_memory=True)

    model = models.Net()
    model = torch.nn.DataParallel(model).cuda()
    print("=> creating model %s " % model.__class__.__name__)

    savedir = 'CNN_iter'
    checkpointdir = os.path.join('./checkpoints', savedir)

    if not debug:
        os.mkdir(checkpointdir)
        print('log directory: %s' % os.path.join('./logs', savedir))
        print('checkpoints directory: %s' % checkpointdir)

    # Set the logger
    if not debug:
        logger = Logger(os.path.join('./logs/', savedir))

    # define loss function (criterion) and optimizer
    criterion = nn.CrossEntropyLoss().cuda()
    optimizer = torch.optim.SGD(model.parameters(),
                                args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)

    # optionally resume from a checkpoint
    if args.resume:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint['epoch']
            best_prec1 = checkpoint['best_prec1']
            model.load_state_dict(checkpoint['state_dict'])
            optimizer.load_state_dict(checkpoint['optimizer'])
            print(
                "=> loaded checkpoint '{}' (epoch {}, best_prec1 @ Source {})".
                format(args.resume, checkpoint['epoch'], best_prec1))
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))

    if args.evaluate:
        evaluate(test_loader, model, criterion)
        return

    cudnn.benchmark = True

    timestart = time.time()
    best_prec1 = 0

    for epoch in range(args.start_epoch, args.epochs):
        adjust_learning_rate(optimizer, epoch)

        # train for one epoch
        print('\n*** Start Training *** \n')
        train(train_loader, model, criterion, optimizer, epoch)

        # evaluate on validation set
        print('\n*** Start Testing *** \n')
        test_loss, test_prec1, _ = evaluate(test_loader, model, criterion)

        info = {'Testing loss': test_loss, 'Testing Accuracy': test_prec1}

        # remember best prec@1 and save checkpoint
        is_best = test_prec1 > best_prec1
        best_prec1 = max(test_prec1, best_prec1)

        if is_best:
            best_epoch = epoch + 1

        if not debug:
            for tag, value in info.items():
                logger.scalar_summary(tag, value, epoch + 1)

                save_checkpoint(
                    {
                        'epoch': epoch + 1,
                        'state_dict': model.state_dict(),
                        'best_prec1': best_prec1,
                        'last_prec1': test_prec1,
                        'optimizer': optimizer.state_dict()
                    }, is_best, checkpointdir)

    print('Best epoch: ', best_epoch)
    print('Total processing time: %.4f' % (time.time() - timestart))
Esempio n. 16
0
def main():
    #The main() is used to finish all the training operation.

    #Record the training log using Tensorboard
    writer = SummaryWriter()

    #Load model
    net = models.Net(name=experiment_name)

    #Use CUDA or not
    use_cuda = torch.cuda.is_available()
    if use_cuda:
        net.cuda()

    #Define the transform for loading image
    trans = transforms.Compose([transforms.RandomHorizontalFlip(),
                                transforms.Scale(size=(64,64)),
                                transforms.ToTensor(),])

    #Set Adam as our optimizer
    optimizer = optim.Adam(net.parameters(), lr=lr, weight_decay=5e-4)

    #Load train and test data and set corresponding parameters
    TrainData = MyLoader('./train.txt', trans)
    trainloader = DataLoader(dataset=TrainData, batch_size=batch_size, num_workers=16, shuffle=True, )
    TestData = MyLoader('./test.txt', trans)
    testloader = DataLoader(dataset=TestData, batch_size=batch_size, num_workers=16, shuffle=True)

    #Set log path to save the training log.
    log_path = os.path.join(logdir, experiment_name+extra_name)
    if not os.path.exists(log_path):
        os.mkdir(log_path)

    #Petient is used to decide when to end training.
    step = 1
    duration = 0.
    precision = 0.
    petient = 0

    #Use CrossEntropyLoss
    criterion = nn.CrossEntropyLoss().cuda()

    for epoch in range(1, 501):
        #Train
        for batch_idx, (img, target) in enumerate(trainloader):
            start_time = time.time()
            if use_cuda:
                img, target = img.cuda(), target.cuda()
            img, target = Variable(img), Variable(target)

            pred = net.train()(img)

            loss = criterion(pred, target)

            learning_rate = adjust_lr(optimizer=optimizer, cur_stp=step)

            #Show the information about training
            duration += time.time() - start_time
            if step%steps_to_show == 0:
                speed = batch_size*steps_to_show/duration
                duration = 0.
                print '=>%s: epoch: %d, step: %d, loss=%f, lr=%f, (%.1f examples/sec)' % (datetime.now().strftime('%Y-%m-%d %H:%M:%S'), epoch, step, loss.data[0], learning_rate, speed)

            #Save model
            if step%steps_to_save==0:
                model_path = os.path.join(log_path, 'model_%d.tar'%(step))
                torch.save(net.state_dict(), model_path)
                print 'model saved at:', model_path

            writer.add_scalar('loss', loss.data[0], step)
            writer.add_scalar('lr', learning_rate, step)
            step += 1

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

        correct_num = 0.
        count = 0

        #Evaluation
        for batch_idx, (img, target) in enumerate(testloader):
            if use_cuda:
                img, target = img.cuda(), target.cuda()
            img = Variable(img)
            pred = net.eval()(img)
            correct_num += accuracy(pred, target)
            count += target.size(0)

        #Decide whether to end training according to the precision and petient
        precision_ = float(correct_num)/float(count)
        if precision_ > precision:
            precision = precision_
            model_path = os.path.join(log_path, 'model_%.4f.tar'%(precision))
            torch.save(net.state_dict(), model_path)
            print 'accuracy: ', precision
            print 'model saved at:', model_path
        else:
            petient += 1
            if petient==100:
                print "Time to stop"
                writer.close()
                quit()
    writer.close()
    model_path = os.path.join(log_path, 'model_final.tar')
    torch.save(net.state_dict(), model_path)
Esempio n. 17
0
######################################################################################################
# LOADING OF VAE OUTPUT SKILL WEIGHTS BACK INTO THE MNSIT NETWORK
######################################################################################################
def loadWeights_mnsit(weights_to_load, net):
    model.conv1.weight.data = torch.from_numpy(weights_to_load[0]).cuda()
    model.conv1.bias.data =   torch.from_numpy(weights_to_load[1]).cuda()
    model.conv2.weight.data = torch.from_numpy(weights_to_load[2]).cuda()
    model.conv2.bias.data =   torch.from_numpy(weights_to_load[3]).cuda()
    model.fc1.weight.data =   torch.from_numpy(weights_to_load[4]).cuda()
    model.fc1.bias.data =     torch.from_numpy(weights_to_load[5]).cuda()
    model.fc2.weight.data =   torch.from_numpy(weights_to_load[6]).cuda()
    model.fc2.bias.data =     torch.from_numpy(weights_to_load[7]).cuda()
    return model

print("device is ",device)
model = models.Net().to(device)
model_reset= models.Net().to(device)
optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum)
student_model=models.CAE().to(device)#nn.DataParallel(models.CAE().to(device))
teacher_model=models.CAE().to(device)#nn.DataParallel(models.CAE().to(device))
vae_optimizer = optim.Adam(student_model.parameters(), lr = 0.0001)
lam = 0.001
Actual_Accuracy=[]
threshold_batchid=[]

#####################################################################################################################
# For Viewing the test/train images-just for confirmation
#####################################################################################################################
classes = ('0','1', '2', '3', '4','5','6','7','8','9')
# functions to show an image
#plt.show()
Esempio n. 18
0
def main():
    import argparse
    parser = argparse.ArgumentParser()
    parser.add_argument('expert_policy_file', type=str)
    parser.add_argument('envname', type=str)
    parser.add_argument('--num_train_rolls', type=int, default=20)
    parser.add_argument('--num_its', type=int, default=10)
    parser.add_argument('--render', action='store_true')
    parser.add_argument('--train', action='store_true')
    args = parser.parse_args()

    env = gym.make(args.envname)
    max_steps = env.spec.max_episode_steps
    policy_fn = load_policy.load_policy(args.expert_policy_file)

    policies = {}
    policies['expert'] = policy_fn
    # Store checkpoints as well
    policies['BC'] = models.Net(obs_size=env.observation_space.shape[0],
                                hidden=256,
                                act_space_size=env.action_space.shape[0])

    policies['Dagger'] = models.Net(obs_size=env.observation_space.shape[0],
                                    hidden=256,
                                    act_space_size=env.action_space.shape[0])

    policy_wts = {}

    regex = re.compile('.*{}_iters'.format(args.num_its - 1))
    for root, dirs, files in sorted(os.walk('./checkpoints/Dagger/{}'.format(args.envname))):
        for dir in dirs:
            if regex.match(dir):
                policy_wts['Dagger'] = op.join(root, dir, 'cp.ckpt')

    regex = re.compile('.*{}_train_rolls'.format(args.num_train_rolls))
    for root, dirs, files in sorted(os.walk('./checkpoints/BC/{}'.format(args.envname))):
        for dir in dirs:
            if regex.match(dir):
                policy_wts['BC'] = op.join(root, dir, 'cp.ckpt')

    print(policy_wts['Dagger'], policy_wts['BC'])
    checkpoint_path = policy_wts['Dagger']
    checkpoint_dir = os.path.dirname(checkpoint_path)
    restore_checkpoint = tf.train.Checkpoint(model=policies['Dagger'])
    restore_checkpoint.restore(checkpoint_path).expect_partial()

    checkpoint_path = policy_wts['BC']
    checkpoint_dir = os.path.dirname(checkpoint_path)
    restore_checkpoint = tf.train.Checkpoint(model=policies['BC'])
    restore_checkpoint.restore(checkpoint_path).expect_partial()

    # Visualize the trained policy and calculate returns
    returns, avg_returns = {}, {}
    num_episodes = 20

    for policy in ['expert', 'BC', 'Dagger']:
        returns[policy] = []
        for i in range(num_episodes):
            returns[policy].append(run_policy(env, policies[policy], policy=policy))

        avg_returns[policy] = np.mean(returns[policy])

    # with open('{}_{}_eps_{}_epochs.pkl', 'wb') as file:
    #     pickle.dump(avg_returns, file, protocol=2)

        print("Average return from {} : ".format(policy), avg_returns[policy])
Esempio n. 19
0
def main():
    global args
    args = parser.parse_args()
    traindir = os.path.join(args.data, 'RealChallengeFree/train')
    testdir = os.path.join(args.data, 'RealChallengeFree/Test')
    train_dataset = utils.CURETSRDataset(
        traindir,
        transforms.Compose([
            transforms.Resize([28, 28]),
            transforms.ToTensor(), utils.l2normalize, utils.standardization
        ]))
    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=args.batch_size,
                                               shuffle=False,
                                               num_workers=args.workers,
                                               pin_memory=True)
    test_dataset = utils.CURETSRDataset(
        testdir,
        transforms.Compose([
            transforms.Resize([28, 28]),
            transforms.ToTensor(), utils.l2normalize, utils.standardization
        ]))
    test_loader = torch.utils.data.DataLoader(test_dataset,
                                              batch_size=args.batch_size,
                                              shuffle=False,
                                              num_workers=args.workers,
                                              pin_memory=True)

    model = models.AutoEncoder()
    model = torch.nn.DataParallel(model).cuda()
    print("=> creating model %s " % model.__class__.__name__)
    criterion = nn.MSELoss().cuda()

    savedir = 'AutoEncoder'
    checkpointdir = os.path.join('./checkpoints', savedir)
    os.makedirs(checkpointdir, exist_ok=True)
    print('log directory: %s' % os.path.join('./logs', savedir))
    print('checkpoints directory: %s' % checkpointdir)
    logger = Logger(os.path.join('./logs/', savedir))
    if args.evaluate:
        print("=> loading checkpoint ")
        checkpoint = torch.load(
            os.path.join(checkpointdir, 'model_best.pth.tar'))
        model.load_state_dict(checkpoint['AE_state_dict'], strict=False)
        modelCNN = models.Net()
        modelCNN = torch.nn.DataParallel(modelCNN).cuda()
        checkpoint2 = torch.load('./checkpoints/CNN_iter/model_best.pth.tar')
        modelCNN.load_state_dict(checkpoint2['state_dict'], strict=False)
        evaluate(test_loader, model, modelCNN, criterion)
        return
    optimizer = torch.optim.Adam(model.parameters(),
                                 args.lr,
                                 weight_decay=args.weight_decay)
    cudnn.benchmark = True

    timestart = time.time()

    if args.finetune:
        print("=> loading checkpoint ")
        checkpoint = torch.load(
            os.path.join(checkpointdir, 'model_best.pth.tar'))
        model.load_state_dict(checkpoint['AE_state_dict'], strict=False)
        optimizer.load_state_dict(checkpoint['optimizer'])

    best_loss = 10e10
    # train_accs = []
    # test_accs = []
    loss_epochs = []

    for epoch in range(args.start_epoch, args.epochs):
        adjust_learning_rate(optimizer, epoch)
        print('\n*** Start Training *** \n')
        loss_train = train(train_loader, test_loader, model, criterion,
                           optimizer, epoch)
        print(loss_train)
        loss_epochs.append(loss_train)
        is_best = loss_train < best_loss
        print(best_loss)
        best_loss = min(loss_train, best_loss)
        info = {
            'Loss': loss_train
            # 'Testing Accuracy': test_prec1
        }
        # if not debug:
        for tag, value in info.items():
            logger.scalar_summary(tag, value, epoch + 1)
        if is_best:
            best_epoch = epoch + 1
        save_checkpoint(
            {
                'epoch': epoch + 1,
                'AE_state_dict': model.state_dict(),
                'optimizer': optimizer.state_dict()
            }, is_best, checkpointdir)
    generate_plots(range(args.start_epoch, args.epochs), loss_epochs)
    print('Best epoch: ', best_epoch)
    print('Total processing time: %.4f' % (time.time() - timestart))
    print('Best loss:', best_loss)
Esempio n. 20
0
    #     # print(imgset.shape)
    #     if label != 1:
    #         for count, img in enumerate(imgset):
    #             plt.subplot(1, len(imgset) + 1, count + 1)
    #             plt.imshow(img[0][0])
    #             # print(img.shape)
    #         print(mainImg.shape)
    #         plt.subplot(1, len(imgset) + 1, len(imgset) + 1)
    #         plt.imshow(mainImg[0][0])
    #         count += 1
    #         break
    #     # break

    # creating the original network and couting the paramenters of different networks
    device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
    siameseBaseLine = models.Net()
    siameseBaseLine = siameseBaseLine.to(device)
    count_parameters(siameseBaseLine)

    # actual training
    import torch.optim as optim

    optimizer = optim.Adam(siameseBaseLine.parameters(), lr=0.0006)
    num_epochs = 5
    criterion = nn.BCEWithLogitsLoss()
    save_path = 'siameseNet-batchnorm50.pt'
    train_losses, val_losses = train(siameseBaseLine, train_loader, val_loader, num_epochs, criterion, save_path)

    # Evaluation on previously saved models
    import torch.optim as optim
Esempio n. 21
0
def main():
    # Training settings
    parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
    parser.add_argument('--batch-size',
                        type=int,
                        default=64,
                        metavar='N',
                        help='input batch size for training (default: 64)')
    parser.add_argument('--test-batch-size',
                        type=int,
                        default=1000,
                        metavar='N',
                        help='input batch size for testing (default: 1000)')
    parser.add_argument('--epochs',
                        type=int,
                        default=10,
                        metavar='N',
                        help='number of epochs to train (default: 10)')
    parser.add_argument('--lr',
                        type=float,
                        default=0.01,
                        metavar='LR',
                        help='learning rate (default: 0.01)')
    parser.add_argument('--momentum',
                        type=float,
                        default=0.5,
                        metavar='M',
                        help='SGD momentum (default: 0.5)')
    parser.add_argument('--no-cuda',
                        action='store_true',
                        default=False,
                        help='disables CUDA training')
    parser.add_argument('--seed',
                        type=int,
                        default=1,
                        metavar='S',
                        help='random seed (default: 1)')
    parser.add_argument(
        '--log-interval',
        type=int,
        default=10,
        metavar='N',
        help='how many batches to wait before logging training status')
    args = parser.parse_args()
    use_cuda = not args.no_cuda and torch.cuda.is_available(
    )  # 根据输入参数和实际cuda的有无决定是否使用GPU
    torch.manual_seed(args.seed)  # 设置随机种子,保证可重复
    device = torch.device("cuda" if use_cuda else "cpu")  # 设置使用CPU or GPU

    kwargs = {
        'num_workers': 1,
        'pin_memory': True
    } if use_cuda else {}  # 设置数据加载的子进程数;是否返回之前将张量复制到cuda的页锁定内存

    dst = Dataset()
    train_loader = torch.utils.data.DataLoader(dst.data_train,
                                               batch_size=args.batch_size,
                                               shuffle=True,
                                               **kwargs)
    test_loader = torch.utils.data.DataLoader(dst.data_test,
                                              batch_size=args.test_batch_size,
                                              shuffle=True,
                                              **kwargs)

    model = models.Net().to(device)
    optimizer = optim.SGD(model.parameters(),
                          lr=args.lr,
                          momentum=args.momentum)  # 实例化求解器
    for epoch in range(1, args.epochs + 1):  # 循环调用train() and test() 进行epoch迭代
        train(args, model, device, train_loader, optimizer, epoch)
        test(args, model, device, test_loader)
        vis.plot({'train_loss': train_loss_meter.value()[0]})
        vis.plot({'test_acc': test_acc_meter.value()[0]})  # 为了可视化增加的内容
Esempio n. 22
0
def main():
    global args, best_prec1
    args = parser.parse_args()

    conf_name = args.data
    conf_name += '_ortho' if args.ortho else '' 
    conf_name += ('_pre_' + args.net_type) if args.net_type != 'default' else ''
    
    args.checkpoint = os.path.join(args.checkpoint, conf_name, 'imprint_checkpoint')
    print(args.data)
    print(args.checkpoint)

    if not os.path.isdir(args.checkpoint):
        mkdir_p(args.checkpoint)

    model = models.Net(extractor_type=args.net_type).cuda()


    print('==> Reading from model checkpoint..')
    assert os.path.isfile(args.model), 'Error: no model checkpoint directory found!'
    checkpoint = torch.load(args.model)
    args.start_epoch = checkpoint['epoch']
    best_prec1 = checkpoint['best_prec1']
    model.load_state_dict(checkpoint['state_dict'])
    print("=> loaded model checkpoint '{}' (epoch {})"
            .format(args.model, checkpoint['epoch']))
    cudnn.benchmark = True


    ## check orthogonality
    #import numpy as np
    #W = model.classifier.fc.weight.data
    #print(W.size())
    
    #d_list = []
    #for i in range(W.size(0)):
    #    for j in range(i,W.size(0)):
        
    #        if i==j:
    #            continue
             
    #        r1 = W[i]
    #        r2 = W[j]
                
            #r1 = torch.nn.functional.normalize(r1,p=2,dim=0)
            #r2 = torch.nn.functional.normalize(r2,p=2,dim=0)
    #        d = torch.dot(r1,r2)
    #        d_list.append(d.item())
            
    #d_list = np.array(d_list)
    #np.save('ortho_dotprod_hist.npy', d_list)
    # 
    #import matplotlib.pyplot as plt
    #plt.hist(d_list, bins=200, range=(-0.25,0.25), normed=True)  # arguments are passed to np.histogram
    #plt.title('Dot product histogram $\sigma$ = {:02f}'.format(np.std(d_list)))
    #plt.show() 

    # Data loading code
    normalize = transforms.Normalize(mean=[0.5, 0.5, 0.5],
                                     std=[0.5, 0.5, 0.5])

    novel_trasforms = transforms.Compose([
            transforms.Resize(256),
            transforms.CenterCrop(224),
            transforms.ToTensor(),
            normalize]) if not args.aug else transforms.Compose([
            transforms.Resize(256),
            transforms.RandomCrop(224),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            normalize,
        ])

    novel_dataset = loader.ImageLoader(
        args.data,
        novel_trasforms,
        train=True, num_classes=200, 
        num_train_sample=args.num_sample, 
        novel_only=True, aug=args.aug)

    novel_loader = torch.utils.data.DataLoader(
        novel_dataset, batch_size=args.batch_size, shuffle=False,
        num_workers=args.workers, pin_memory=True)

    val_loader = torch.utils.data.DataLoader(
        loader.ImageLoader(args.data, transforms.Compose([
            transforms.Resize(256),
            transforms.CenterCrop(224),
            transforms.ToTensor(),
            normalize,
        ]), num_classes=200, novel_only=args.test_novel_only),
        batch_size=args.batch_size, shuffle=True,
        num_workers=args.workers, pin_memory=True)

    imprint(novel_loader, model)
    test_acc = validate(val_loader, model)

    save_checkpoint({
            'state_dict': model.state_dict(),
            'best_prec1': test_acc,
        }, checkpoint=args.checkpoint)
Esempio n. 23
0
 def load_model(self, model_path: str = 'mnist_0.49_error_rate.pt'):
     self.model = models.Net()
     self.model.load_state_dict(torch.load(model_path))  # load weights
     if torch.cuda.is_available():
         self.model = self.model.to(device)
     self.model.eval()
Esempio n. 24
0
traindataset = MyCustomFSDD(data_path=data_path, train=True)
trainloader = DataLoader(traindataset,
                         batch_size=batch_size,
                         shuffle=True,
                         pin_memory=True,
                         num_workers=0)

testdataset = MyCustomFSDD(data_path=data_path, train=False)
testloader = DataLoader(testdataset,
                        batch_size=1,
                        shuffle=True,
                        pin_memory=True,
                        num_workers=0)

# Initialize the NN model
net = models.Net()
net = net.to(device)

# Optimizer and Loss function
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(net.parameters(), lr=learning_rate)

start = timer()  # start the timer
# Training
if __name__ == "__main__":
    for epoch in range(max_epoch + 1):
        batchiter = 0

        for batch in trainloader:

            batchiter += 1
Esempio n. 25
0
def main():
    from datetime import datetime

    # this has been changed to run jupyter
    #
    # non jupyter ##############################################################
    if len(sys.argv) > 1:
        name = ' '.join(sys.argv[1:])
    else:
        name = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
    ############################################################################

    # remove line below if not running on jupyter
    name = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")

    target_name = os.path.join('logs', '{}.pth'.format(name))
    print('will save to {}'.format(target_name))

    cudnn.benchmark = True

    train_loader = data.get_loader(train=True)
    val_loader = data.get_loader(val=True)

    net = nn.DataParallel(models.Net(
        train_loader.dataset.num_tokens)).cuda()  #change made here
    optimizer = optim.Adam([p for p in net.parameters() if p.requires_grad],
                           weight_decay=0.01)

    tracker = utils.Tracker()
    config_as_dict = {
        k: v
        for k, v in vars(config).items() if not k.startswith('__')
    }

    for i in range(config.epochs):
        _ = run(net,
                train_loader,
                optimizer,
                tracker,
                train=True,
                prefix='train',
                epoch=i)
        r = run(net,
                val_loader,
                optimizer,
                tracker,
                train=False,
                prefix='val',
                epoch=i)

        results = {
            'name': name,
            'tracker': tracker.to_dict(),
            'config': config_as_dict,
            'weights': net.state_dict(),
            'eval': {
                'answers': r[0],
                'accuracies': r[1],
                'idx': r[2],
            },
            'vocab': train_loader.dataset.vocab,
        }
        torch.save(results, target_name)
Esempio n. 26
0
    torch.cuda.set_device(args.gpu)

    data_test =data.DATA_TEST(args, mode='test')
    #data_SVHN =data.SVHN(args, mode='test')

    ''' prepare data_loader '''
    print('===> prepare data loader ...')
    test_loader = torch.utils.data.DataLoader(data_test,
                                              batch_size=args.test_batch, 
                                              num_workers=args.workers,
                                              shuffle=False)
    ''' prepare mode '''
    #load best model
    if(args.target_set == 'svhn'):
      if(args.resume_svhn =='model_target_svhn.pth.tar?dl=1'):
          model = models.Net(args).cuda()
      else:
          model = models_best.Net(args).cuda()
      print('checkpoint schn')
      checkpoint = torch.load(args.resume_svhn)
    
    elif (args.target_set == 'mnistm'):
      if(args.resume_mnistm =='model_target_mnistm.pth.tar?dl=1'):
          model = models.Net(args).cuda()
      else:
          model = models_best.Net(args).cuda()
      print('checkpoint mnistm')
      checkpoint = torch.load(args.resume_mnistm)
    #save predictions

    ''' resume save model '''
        sql=args.seq_len)

    testInsts[i], testLabelPis[i], testLabelEmos[i] = batchify(
        data.test_insts[i],
        data.test_label_pis[i],
        data.test_label_emos[i],
        sql=args.seq_len)

#####################################################
# Build the model
#####################################################
net = [0] * 3
opt = [0] * 3
for i in range(3):
    net[i] = models.Net(62, args.nhid, args.nlayer, args.seq_len,
                        args.batch_size, args.rnn_type, args.total_layer,
                        args.g_dropout, args.pi_dropout, args.emo_dropout,
                        args.ntask).double().to(device)
    opt[i] = torch.optim.Adam(net[i].parameters(), lr=args.lr)

if args.way_loss == 0:
    weigh_loss = models.Weigh_Loss_Simple().double().to(device)
elif args.way_loss == 1:
    weigh_loss = models.Weigh_Loss_MLM().double().to(device)
else:
    weigh_loss = models.Sum_Loss().double().to(device)

criterion = nn.CrossEntropyLoss()

#####################################################
# Training code
#####################################################
Esempio n. 28
0
# Get a batch of training data
inputs, classes = next(iter(train_loader))
inputs = inputs * std_tensor + mean_tensor

# Make a grid from batch
out = torchvision.utils.make_grid(inputs)  # CHW
outn = out.numpy().transpose(1, 2, 0)  # HWC

plt.figure()
plt.imshow(outn)
#plt.title(",".join([classnames[i] for i in classes]))
plt.axis('off')
plt.savefig('augmentations.pdf', bbox_inches='tight')
plt.show()

model = models.Net(use_dropout, use_bn, l2_reg)
model = model.to(device)

# Display information about the model
summary_text = "Summary of the model architecture\n"+ \
        "=================================\n" + \
        f"{deepcs.display.torch_summarize(model)}\n"

print(summary_text)
"""
Adapted from : 
https://github.com/seominseok0429/label-smoothing-visualization-pytorch
"""


class LabelSmoothingCrossEntropy(nn.Module):
Esempio n. 29
0
         transforms.Normalize((0.1722, ), (0.3309, ))])),
                                           batch_size=args.batch_size,
                                           shuffle=True,
                                           **kwargs)
test_loader = torch.utils.data.DataLoader(datasets.EMNIST(
    'data',
    'letters',
    train=False,
    transform=transforms.Compose(
        [transforms.ToTensor(),
         transforms.Normalize((0.1722, ), (0.3309, ))])),
                                          batch_size=args.test_batch_size,
                                          shuffle=True,
                                          **kwargs)

model = models.Net()

# D_in    = 28*28
# H       = 500
# D_out   = 27

# model = torch.nn.Sequential(
#     torch.nn.Linear(D_in, H),   torch.nn.ReLU(),
#     torch.nn.Linear(H, H),      torch.nn.ReLU(),
#     torch.nn.Linear(H, D_out),
# )

if args.cuda:
    model.cuda()

optimizer = optim.Adam(model.parameters(), lr=args.lr)
Esempio n. 30
0
import os
import shutil

from torch.utils.tensorboard import SummaryWriter

if __name__ == '__main__':
    device = 'cuda:0' if torch.cuda.is_available() else 'cpu'

    parser = argparse.ArgumentParser()
    parser.add_argument('--batch_size', type=int, default=8)
    parser.add_argument('--lr', type=float, default=0.0001)
    parser.add_argument('--epoch', type=int, default=50)
    args = parser.parse_args()

    train_loader, test_loader = datasets.prepare(batch_size=args.batch_size)
    model = models.Net(num_classes=8).to(device)
    criterion = torch.nn.CrossEntropyLoss()
    optimizer = torch.optim.SGD(model.parameters(), lr=args.lr)

    log_dir = 'data/runs'
    if os.path.exists(log_dir):
        shutil.rmtree(log_dir)
        os.makedirs(log_dir)
    else:
        os.makedirs(log_dir)
    writer = SummaryWriter(log_dir=log_dir)
    epoch_digit = len(list(str(args.epoch)))
    for epoch in range(args.epoch):
        model.train()
        train_loss = 0
        train_acc = 0