Ejemplo n.º 1
0
def get_parameter_number(net):
    total_num = sum(p.numel() for p in net.parameters())
    trainable_num = sum(p.numel() for p in net.parameters() if p.requires_grad)
    return {'Total': total_num, 'Trainable': trainable_num}
Ejemplo n.º 2
0
Archivo: main.py Proyecto: syt2/mnist
def main(arch=None):

    if not os.path.isdir(args.save_path):
        os.makedirs(args.save_path)
    log = open(os.path.join(args.save_path, '{}.txt'.format('log')), 'w')

    if args.tensorboard is None:
        writer = SummaryWriter(args.save_path)
    else:
        writer = SummaryWriter(args.tensorboard)

    print_log('save path : {}'.format(args.save_path), log)
    state = {k: v for k, v in args._get_kwargs()}
    print_log(state, log)
    print_log("Random Seed: {}".format(args.manualSeed), log)
    print_log("use cuda: {}".format(args.use_cuda), log)
    print_log("python version : {}".format(sys.version.replace('\n', ' ')),
              log)
    print_log("torch  version : {}".format(torch.__version__), log)
    print_log("cudnn  version : {}".format(torch.backends.cudnn.version()),
              log)

    # Init data loader
    train_loader = dataset.mnistDataLoader(args.train_dir, True,
                                           args.train_batch_size, True,
                                           args.workers)
    test_loader = dataset.mnistDataLoader(args.test_dir, False,
                                          args.test_batch_size, False,
                                          args.workers)
    num_classes = 10
    input_size = (1, 28, 28)
    net = arch(num_classes)
    print_log("=> network:\n {}".format(net), log)
    summary = model_summary(net, input_size)
    print_log(summary, log)

    writer.add_graph(net, torch.rand([1, 1, 28, 28]))

    if args.ngpu > 1:
        net = torch.nn.DataParallel(net, device_ids=list(range(args.ngpu)))

    criterion = torch.nn.CrossEntropyLoss()
    optimizer = torch.optim.SGD(net.parameters(),
                                state['learning_rate'],
                                momentum=state['momentum'],
                                weight_decay=state['decay'],
                                nesterov=True)

    if args.use_cuda:
        net.cuda()
        criterion.cuda()

    recorder = RecorderMeter(args.epochs)

    if args.resume:
        if os.path.isfile(args.resume):
            print_log("=> loading checkpoint '{}'".format(args.resume), log)
            checkpoint = torch.load(args.resume)
            recorder = checkpoint['recorder']
            args.start_epoch = checkpoint['epoch']
            net.load_state_dict(checkpoint['state_dict'])
            optimizer.load_state_dict(checkpoint['optimizer'])
            print_log(
                "=> loaded checkpoint '{}' (epoch {})".format(
                    args.resume, checkpoint['epoch']), log)
        else:
            print_log("=> no checkpoint found at '{}'".format(args.resume),
                      log)
    else:
        print_log("=> not use any checkpoint for model", log)

    if args.evaluate:
        checkpoint = torch.load(args.save_path + '/model_best.pth.tar')
        net.load_state_dict(checkpoint['state_dict'])
        time1 = time.time()
        validate(test_loader, net, criterion, log, writer, embedding=True)
        time2 = time.time()
        print('validate function took %0.3f ms' % ((time2 - time1) * 1000.0))
        return

    start_time = time.time()
    epoch_time = AverageMeter()
    for epoch in range(args.start_epoch, args.epochs):
        current_learning_rate = adjust_learning_rate(args.learning_rate,
                                                     optimizer, epoch,
                                                     args.gammas,
                                                     args.schedule)
        need_hour, need_mins, need_secs = convert_secs2time(
            epoch_time.avg * (args.epochs - epoch))
        need_time = '[Need: {:02d}:{:02d}:{:02d}]'.format(
            need_hour, need_mins, need_secs)
        print_log(
            '\n==>>{:s} [Epoch={:03d}/{:03d}] {:s} [learning_rate={:6.4f}]'.format(time_string(), epoch, args.epochs,
                                                                                   need_time, current_learning_rate) \
            + ' [Best : Accuracy={:.2f}]'.format(recorder.max_accuracy(False)), log)
        train_acc, train_los = train(train_loader, net, criterion, optimizer,
                                     log)
        val_acc, val_los = validate(test_loader, net, criterion, log)

        is_best = recorder.update(epoch, train_los, train_acc, val_los,
                                  val_acc)
        if is_best:
            save_checkpoint(
                {
                    'epoch': epoch + 1,
                    'state_dict': net.state_dict(),
                    'recorder': recorder,
                    'optimizer': optimizer.state_dict(),
                }, is_best, args.save_path, 'checkpoint.pth.tar')
            print('save ckpt done!')

        writer.add_scalar('Train/loss', train_los, epoch)
        writer.add_scalar('Train/acc', train_acc, epoch)
        writer.add_scalar('Test/acc', val_acc, epoch)
        for name, param in net.named_parameters():
            writer.add_histogram(name, param.clone().cpu().data.numpy(), epoch)

        epoch_time.update(time.time() - start_time)
        start_time = time.time()

    save_checkpoint(
        {
            'state_dict': net.state_dict(),
            'recorder': recorder,
            'optimizer': optimizer.state_dict(),
        }, is_best, args.save_path, 'model.pth.tar')
    print('save model done!')

    checkpoint = torch.load(args.save_path + '/model_best.pth.tar')
    net.load_state_dict(checkpoint['state_dict'])
    time1 = time.time()
    validate(test_loader, net, criterion, log, writer, embedding=True)
    time2 = time.time()
    print_log('validate function took %0.3f ms' % ((time2 - time1) * 1000.0),
              log)

    log.close()
    writer.close()
Ejemplo n.º 3
0
import net
import os

if __name__ == '__main__':
    if not os.path.exists("img"):
        os.makedirs("img")
    tf = transforms.Compose([transforms.ToTensor()])

    mnist_data = datasets.MNIST("/data",
                                train=True,
                                transform=tf,
                                download=True)
    train_loader = DataLoader(mnist_data, 100, shuffle=True)

    net = net.Net_total().cuda()
    opt = torch.optim.Adam(net.parameters())
    loss_fun = nn.MSELoss()
    k = 0
    for epoch in range(100):
        for i, (img, label) in enumerate(train_loader):
            img = img.cuda()
            out_img = net(img)
            loss = loss_fun(out_img, img)

            opt.zero_grad()
            loss.backward()
            opt.step()

            if i % 10 == 0:
                print(loss.item())
                fack_img = out_img.detach()
run_with_gpu = True

# 0. check device
use_cuda = run_with_gpu and torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")

# 1. load data
train_loader, test_loader = data.load_MNIST_data()

# 2. define a neural net
net = net.Net(784, 800, 10).to(device)

# 3. define a loss function and optimizer
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)

# 4. train the neural net
for epoch in range(10):
    # check time
    start = time.time()

    running_loss = 0.0
    for i, data in enumerate(train_loader):
        # get the inputs; data is a list of [inputs, labels]
        inputs, labels = data[0].to(device), data[1].to(device)
        inputs = inputs.view(-1, 784)

        # zero the parameters gradients
        optimizer.zero_grad()
Ejemplo n.º 5
0
    print('finish loading dataset')

    # vgg = vgg16(pretrained=True).to(device)

    print('use pretrained wieght')
    vgg = vgg16_bn(pretrained=True)
    net = net.VGG_New(vgg.features,
                      vgg.classifier,
                      device,
                      num_classes=10,
                      init_weights=False,
                      feature_grad=feature_grad,
                      classifier_grad=classifier_grad)
    # net = net.myVGG16().to(device)
    # optimizer = optim.SGD(net.classifier.parameters(), lr=0.1, momentum=0.9, weight_decay=5e-4)
    optimizer = optim.Adam(net.parameters())
    # net = net.myVGG16().to(device)
    # net = myvgg.VGG('VGG16').to(device)

    print('finish loading NN')

    # optimizer = optim.SGD(net.parameters(), lr=lr, momentum=momentum)
    # optimizer = optim.Adam(net.parameters())
    # optimizer = optim.SGD(net.parameters(), lr=0.1, momentum=0.9, weight_decay=5e-4)
    print('finish loading optimizer')

    criterion = nn.CrossEntropyLoss()
    print('finish loading criterion')

    logger = logger.TrainLogger(out_dir, epochs, args.n)
    print('finish loading logger')
Ejemplo n.º 6
0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 20 01:54:02 2021
@author: fatemeh tahrirchi
"""
import datasets, net
from preprocessing import Preprocessing, CharVectorizer
from net import VDCNN, train, save
import lmdb
import numpy as np
from tqdm import tqdm
import argparse
import torch
from torch.utils.data import DataLoader, Dataset
import os, subprocess
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
MODELS_FOLDER = 'models/vdcnn'
DATA_FOLDER = 'datasets'
DATASET = 'yelp_review_full'  #['yelp_review_full','yelp_review_polarity']
PREPROCES_TYPE = 'lower'  #['lower','denoiser','add_pos','add_hashtag','add_NOT']

# get device to calculate on (either CPU or GPU with minimum memory load)
def get_gpu_memory_map():
    result = subprocess.check_output([
        'nvidia-smi', '--query-gpu=memory.used',
        '--format=csv,nounits,noheader'
    ],
                                     encoding='utf-8')