Exemple #1
0
def main(args):
    test_ds = MnistDataset(
        args.test_image_file,
        args.test_label_file,
        transform=transforms.Compose([ToTensor()]),
    )
    test_loader = torch.utils.data.DataLoader(
        test_ds,
        batch_size=args.batch_size,
        collate_fn=collate_fn,
        shuffle=False,
    )
    model = Net().to(device)
    model.load_state_dict(torch.load(args.checkpoint))
    model.eval()
    predicts = []
    truths = []
    with torch.no_grad():
        for i, sample in enumerate(test_loader):
            X, Y_true = sample["X"].to(device), sample["Y"].to(device)
            output = model(X)
            predicts.append(torch.argmax(output, dim=1))
            truths.append(Y_true)
    predicts = torch.cat(predicts, dim=0)
    truths = torch.cat(truths, dim=0)
    acc = torch.sum(torch.eq(predicts, truths))
    print("Acc: {:.4f}".format(acc / len(predicts)))
def train(epochs, train_loader, dev_loader, lr, seed, log_interval,
          output_dir):
    """Train the model. Store snapshot models in the output_dir alongside
    evaluations on the dev set after each epoch
    """

    model = Net()

    optimizer = optim.Adam(model.parameters(), lr=lr)

    measure_size(model)

    use_cuda = torch.cuda.is_available()
    device = torch.device("cuda:0" if use_cuda else "cpu")
    print("Using device: ", device)

    if use_cuda:
        torch.cuda.manual_seed(seed)
    else:
        torch.manual_seed(seed)

    #torch.backends.cudnn.benchmark = False
    #torch.backends.cudnn.deterministic = True

    model.to(device)

    for epoch in range(1, epochs):

        model.train()
        total_loss = 0.0
        for batch_idx, (data, target) in enumerate(train_loader):
            if use_cuda:
                data, target = data.to(device), target.to(device)
            data = data.unsqueeze_(1)

            optimizer.zero_grad()
            output = model(data)
            loss = F.nll_loss(output, target)
            total_loss += loss.item()
            loss.backward()
            optimizer.step()

            if batch_idx % log_interval == 0:
                print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
                    epoch, batch_idx * len(data), len(train_loader.dataset),
                    100. * batch_idx / len(train_loader), loss.item()))

        print("Total loss = %.6f" % (total_loss / len(train_loader.dataset)))

        test(model, dev_loader,
             os.path.join(output_dir, 'dev-eer-' + str(epoch)))

        torch.save(model, os.path.join(output_dir,
                                       'iter' + str(epoch) + '.mdl'))
Exemple #3
0
def load_model(model_path, TEXT=None, LABEL=None):
    #for saved model (.pt)
    if '.pt' in model_path:
        if torch.typename(torch.load(model_path)) == 'OrderedDict':
            if 'tut' in model_path:

                INPUT_DIM = len(TEXT.vocab)
                EMBEDDING_DIM = 100
                N_FILTERS = 100
                FILTER_SIZES = [3, 4, 5]
                OUTPUT_DIM = 1
                DROPOUT = 0.5
                PAD_IDX = TEXT.vocab.stoi[TEXT.pad_token]

                model = CNN(INPUT_DIM, EMBEDDING_DIM, N_FILTERS, FILTER_SIZES,
                            OUTPUT_DIM, DROPOUT, PAD_IDX)
            elif 'mnist' in model_path:

                model = Net()
            elif 'HELOC' or 'heloc' in model_path:
                input_size = 22
                model = MLP(input_size)
            model.load_state_dict(torch.load(model_path))

        else:
            model = torch.load(model_path)

    #for pretrained model
    elif model_path == 'VGG19':
        model = models.vgg19(pretrained=True)
    elif model_path == 'ResNet50':
        model = models.resnet50(pretrained=True)
    elif model_path == 'DenseNet161':
        model = models.densenet161(pretrained=True)

    model.eval()
    if cuda_available():
        model.cuda()

    return model
Exemple #4
0
cpu = False

if cpu:
    device = torch.device('cpu')
else:
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

# Image transformations
transform = transforms.Compose([
    transforms.Resize(size=(32, 32)),
    transforms.ToTensor(),
    transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])

model = Net()

# 加载模型
print(model)

if cpu:
    checkpoint = torch.load('./weights/chen_liveness_mobilenetv2.pth.tar',
                            map_location=lambda storage, loc: storage)
else:
    checkpoint = torch.load('./weights/chen_liveness_mobilenetv2.pth.tar')
    print('gpu')

new_state_dict = OrderedDict()

# 用了nn.DataParallel的模型需要处理才能在cpu上使用
'''for k, v in checkpoint.items():
Exemple #5
0
print('===> Loading datasets')
train_set = get_training_set(opt.data_dir, opt.lr_train_dataset,
                             opt.hr_train_dataset, opt.upscale_factor,
                             opt.patch_size, opt.data_augmentation)
training_data_loader = DataLoader(dataset=train_set,
                                  num_workers=opt.threads,
                                  batch_size=opt.batchSize,
                                  shuffle=True)

print('===> Building model ', opt.model_type)

#model = SRCNN()
#model = VDSR()
#model = LGCNet()
#model = EEGAN(4)
model = Mymodel(3, 64, opt.scale)
#model = DDBPN(opt)
#model = RDN(opt.scale)
#model = RCAN(opt)
#model = SAN(opt)
#model = torch.nn.DataParallel(model, device_ids=gpus_list)
criterion = nn.L1Loss()  #nn.MSELoss()#nn.L1Loss()

#################################
#################################

# pre_net = Mymodel_pre(3,64,4)
# pre_path = 'checkpoints_my_x4/model_my_x4_epoch_929.pth'
# checkpoint = torch.load(pre_path)
# pre_net.load_state_dict(checkpoint["model"].state_dict())
Exemple #6
0
# author: frendy
# site: http://frendy.vip/
# time: 28/06/2017

import os
import torch
import torch.optim as optim
import torch.nn as nn
from torch.autograd import Variable
from config import MODEL_PATH, DATA_PATH
from models.model import transform, Net
from data import loadTrainData

trainset, trainloader = loadTrainData()

net = Net()
if os.path.exists(MODEL_PATH):
    net.load_state_dict(torch.load(MODEL_PATH))

criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)

for epoch in range(2):  # loop over the dataset multiple times

    running_loss = 0.0
    for i, data in enumerate(trainloader, 0):
        # get the inputs
        inputs, labels = data

        # wrap them in Variable
        inputs, labels = Variable(inputs), Variable(labels)
Exemple #7
0
# author: frendy
# site: http://frendy.vip/
# time: 28/06/2017

import torch
import torchvision

from torch.autograd import Variable
from config import MODEL_PATH
from models.model import Net, classes
from data import loadTestData
from utils.image import imshow

testset, testloader = loadTestData()

net = Net()
net.load_state_dict(torch.load(MODEL_PATH))

dataiter = iter(testloader)
images, labels = dataiter.next()

# print images
imshow(torchvision.utils.make_grid(images))
print('GroundTruth: ', ' '.join('%5s' % classes[labels[j]] for j in range(4)))

outputs = net(Variable(images))

_, predicted = torch.max(outputs.data, 1)

print('Predicted: ',
      ' '.join('%5s' % classes[predicted[j][0]] for j in range(4)))
Exemple #8
0
import torch
import torch.optim as optim

import utils.train_repro_util as repro_util
from models.model import NetVanilla as Net
import utils.utils_global as utils_global
from utils.utils_global import ProgressBar
from properties.properties_repro import PropertiesRepro
from properties.properties_global import PropertiesGlobal

properties_global = PropertiesGlobal()
properties_repro = PropertiesRepro()

training_generator, validation_generator = properties_repro.get_dataloaders()
# declare net and loss
net = Net()
# load model
net.load_state_dict(
    torch.load(properties_repro.path_model_to_load,
               map_location=properties_repro.device))
# send net to cuda
net.to(properties_repro.device)

# define optimizer
optimizer = optim.Adam(net.parameters(), lr=properties_repro.lr)
# define learning rate scheduling
milestones = np.array([25, 37, 50, 62])
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer,
                                                 milestones=milestones,
                                                 gamma=0.5)
Exemple #9
0
    'train':
    datasets.ImageFolder(root = os.path.join(data_path, 'train'),
                         transform=image_transforms['train']),
    'valid':
    datasets.ImageFolder(root = os.path.join(data_path, 'valid'),
                         transform=image_transforms['valid'])
}


# Dataloader iterators, make sure to shuffle
dataloaders = {
    'train': DataLoader(data['train'], batch_size = 64, shuffle = True, num_workers = 8, pin_memory = True),
    'valid': DataLoader(data['valid'], batch_size =64, shuffle = True, num_workers = 8, pin_memory = True),
}

net = Net()
print(net)
net.to(device)

criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=0.01, momentum=0.9, weight_decay=5e-5)

epoches=300

eval_acc_list = []
is_best = False
###训练网络
for epoch in range(epoches):
    train_loss = 0.
    train_acc = 0.
    for inputs, targets in dataloaders['train']:
sys.path.append("../")
from utils.loading import load_data
from models.model import Net
from torch.utils.data import DataLoader

with open("../data/configuration.json", "r") as file:
    data = json.load(file)
data = json.loads(json.dumps(data))
path = data['data']['paths']["train"]
type_ = "train/audio"
BATCH_SIZE = 32
N_EPOCHS = 5
N_classes = 30
lr = 0.001
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
net = Net(BATCH_SIZE, 1, 128, 1).to(device)

MAX_NUM_WAVS_PER_CLASS = 2**27 - 1  # ~134M
criterion = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(net.parameters(), lr=lr, momentum=0.9)


def train(epoch, data):
    net.train().to(device)
    # zero the parameter gradients
    optimizer.zero_grad()
    inputs, labels = data
    # print(type(inputs))
    inputs = torch.from_numpy(np.asarray(inputs).astype(np.float32))
    permutation = torch.randperm(inputs.size()[0])
    running_loss = 0
Exemple #11
0
class Trainer(object):
    def __init__(self, args):
        self.args = args
        self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        self.prepare_data()
        self.setup_train()

    def prepare_data(self):
        train_val = MnistDataset(
            self.args.train_image_file,
            self.args.train_label_file,
            transform=transforms.Compose([ToTensor()]),
        )
        train_len = int(0.8 * len(train_val))
        train_ds, val_ds = torch.utils.data.random_split(
            train_val, [train_len, len(train_val) - train_len]
        )
        print("Train {}, val {}".format(len(train_ds), len(val_ds)))
        self.train_loader = torch.utils.data.DataLoader(
            train_ds,
            batch_size=self.args.batch_size,
            collate_fn=collate_fn,
            shuffle=True,
        )
        self.val_loader = torch.utils.data.DataLoader(
            val_ds,
            batch_size=self.args.batch_size,
            collate_fn=collate_fn,
            shuffle=False,
        )

    def setup_train(self):
        self.model = Net().to(self.device)
        self.optimizer = torch.optim.SGD(self.model.parameters(), lr=self.args.lr)
        self.criterion = nn.CrossEntropyLoss().to(self.device)
        if not os.path.isdir(self.args.ckpt):
            os.mkdir(self.args.ckpt)

    def train_one_epoch(self):
        train_loss = 0.0
        self.model.train()
        for i, sample in enumerate(self.train_loader):
            X, Y_true = sample["X"].to(self.device), sample["Y"].to(self.device)
            self.optimizer.zero_grad()
            output = self.model(X)
            loss = self.criterion(output, Y_true)
            loss.backward()
            self.optimizer.step()
            train_loss += loss.item()
        return train_loss / len(self.train_loader)

    def evaluate(self):
        val_loss = 0.0
        self.model.eval()
        predicts = []
        truths = []
        with torch.no_grad():
            for i, sample in enumerate(self.val_loader):
                X, Y_true = sample["X"].to(self.device), sample["Y"].to(self.device)
                output = self.model(X)
                loss = self.criterion(output, Y_true)
                val_loss += loss.item()
                predicts.append(torch.argmax(output, dim=1))
                truths.append(Y_true)
        predicts = torch.cat(predicts, dim=0)
        truths = torch.cat(truths, dim=0)
        acc = torch.sum(torch.eq(predicts, truths))
        return acc / len(predicts), val_loss / (len(self.val_loader))

    def run(self):
        min_loss = 10e4
        max_acc = 0
        for epoch in range(self.args.epochs):
            train_loss = self.train_one_epoch()
            val_acc, val_loss = self.evaluate()

            if val_acc > max_acc:
                max_acc = val_acc
                torch.save(
                    self.model.state_dict(),
                    os.path.join(
                        self.args.ckpt,
                        "{}_{}_{:.4f}.pth".format(self.args.name, epoch, max_acc),
                    ),
                )
            print(
                "Epoch {}, loss {:.4f}, val_acc {:.4f}".format(
                    epoch, train_loss, val_acc
                )
            )
Exemple #12
0
import torch.optim as optim

import utils.utils_global as utils_global
from models.model import NetVanilla as Net
import utils.train_dsac_util as ransac_util
import utils.train_dsac_sub as sub
from utils.utils_global import ProgressBar

from properties.properties_dsac import PropertiesDsac
from properties.properties_global import PropertiesGlobal

properties_global = PropertiesGlobal()
properties_dsac = PropertiesDsac()

# Loading pre-trained model
model = Net()
model.load_state_dict(torch.load(properties_dsac.model_path, map_location=properties_global.device))
model.to(properties_global.device)


training_generator, validation_generator = properties_dsac.get_dataloaders()
optimizer = optim.Adam(model.parameters(), lr=properties_dsac.learning_rate)

print('INFO TRAINING: ')
print('repro error threshold: {} pixels'.format(properties_dsac.inlier_threshold))
print('number of refinement iters: {}'.format(properties_dsac.number_refinement_iterations))

if properties_dsac.starting_epoch == 0:
    losses_log = np.zeros((properties_dsac.max_epochs, 3))
else:
    losses_old = np.loadtxt(os.path.join(properties_dsac.save_in_folder, 'log ransac.txt'))
Exemple #13
0
use_cuda = torch.cuda.is_available()

state_dict = torch.load(config["model"])

df = pd.read_csv('kaggle_template.csv')

features_test = np.load(
    os.path.join(config["load_dir"] + '/_feature_test_assigment.npy'))
features_test_crop = np.load(
    os.path.join(config["load_dir"] + '/_feature_test_assigment_crop.npy'))
if config["concatenate"]:
    features_test = np.concatenate((features_test, features_test_crop), axis=1)

#Model
model = Net(features_test.shape[1])

model.load_state_dict(state_dict['model'])
model.eval()
if use_cuda:
    print('Using GPU')
    model.cuda()
else:
    print('Using CPU')

for dossier, sous_dossiers, fichiers in os.walk(config["test_dir"]):
    for num, fichier in enumerate(fichiers):
        num_photo = df.loc[df['Id'] == fichier.split('.')[0]].index[0]
        data = torch.tensor(features_test[num])
        if use_cuda:
            data = data.cuda()
Exemple #14
0
dataloders = {
    x: torch.utils.data.DataLoader(features_datasets[x],
                                   batch_size=config["batchsize"],
                                   shuffle=True,
                                   num_workers=4)
    for x in ['train_images', 'val_images']
}
dataset_sizes = {
    x: len(features_datasets[x])
    for x in ['train_images', 'val_images']
}

# Neural network and optimizer
# We define neural net in model.py so that it can be reused by the evaluate.py script

model = Net(features_train.shape[1])

print(model)

if use_cuda:
    print('Using GPU')
    model.cuda()
else:
    print('Using CPU')

optimizer = optim.SGD(model.parameters(),
                      lr=config["lr"],
                      momentum=config["momentum"])
criterion = nn.CrossEntropyLoss()

# Run the functions and save the best model in the function model_ft.
Exemple #15
0
 def setup_train(self):
     self.model = Net().to(self.device)
     self.optimizer = torch.optim.SGD(self.model.parameters(), lr=self.args.lr)
     self.criterion = nn.CrossEntropyLoss().to(self.device)
     if not os.path.isdir(self.args.ckpt):
         os.mkdir(self.args.ckpt)
cpu = False

if cpu:
    device = torch.device('cpu')
else:
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

# Image transformations
transform = transforms.Compose([
    transforms.Resize(size=(32, 32)),
    transforms.ToTensor(),
    transforms.Normalize([0.485, 0.456, 0.406], [0.226, 0.226, 0.226])
])

model = Net()

# 加载模型
print(model)

if cpu:
    checkpoint = torch.load('./weights/best_model-20200904.pth.tar',
                            map_location=lambda storage, loc: storage)
else:
    checkpoint = torch.load('./weights/best_model-20200904.pth.tar')

#new_state_dict = OrderedDict()

# 用了nn.DataParallel的模型需要处理才能在cpu上使用
'''for k, v in checkpoint.items():
    name = k[7:]  # remove module.
dataloaders = {
    'train':
    DataLoader(data['train'],
               batch_size=args.batch_size,
               shuffle=True,
               num_workers=8,
               pin_memory=True),
    'valid':
    DataLoader(data['valid'],
               batch_size=args.batch_size,
               shuffle=True,
               num_workers=8,
               pin_memory=True),
}

model = Net()
#model = MobileNet(inputs)

MOMENTUM = 0.9
WEIGHT_DECAY = 5e-5

print(model)
'''if args.resume_weights:
    mask_checkpoint = torch.load(args.resume_weights)

    new_mask_state_dict = OrderedDict()

    for k, v in mask_checkpoint.items():
        name = k[7:]  # remove module.
        new_mask_state_dict[name] = v