示例#1
0
from torch.utils.data import DataLoader

from dataset import FightDataset
from transform import Compose, ToTensor, Resize
from model import MyNet

torch.cuda.set_device(0)
transform_ = Compose([Resize((112, 112)), ToTensor()])
xx = FightDataset("./fight_classify", tranform=transform_)

dataloader = DataLoader(xx, batch_size=1, shuffle=True)
# for i_batch, sample_batched in enumerate(dataloader):
#     print(i_batch)
#     print(sample_batched["image"].size())
dev = torch.device("cuda:0")
model = MyNet().to(dev)

criterion = torch.nn.MSELoss(reduction='sum')
optimizer = torch.optim.SGD(model.parameters(), lr=1e-8, momentum=0.9)

for t in range(20):
    # Forward pass: Compute predicted y by passing x to the model
    for i_batch, sample_batched in enumerate(dataloader):
        image = sample_batched["image"]
        label = sample_batched["label"]
        # label = torch.transpose(label, 0,1)
        y_pred = model(image)
        # print(y_pred)
        # print(label)
        # Compute and print loss
        loss = criterion(y_pred, label)
示例#2
0
                                         num_workers=2)

# Access datasets properties
train_shape = trainset.data.shape
test_shape = testset.data.shape
train_nb = train_shape[0]
test_nb = test_shape[0]
height = train_shape[1]
width = train_shape[2]
classes = trainset.classes
print('Training set size : %d' % train_nb)
print('Test set size     : %d' % test_nb)
print('Image size        : %d x %d\n' % (height, width))

# Build the network
model = MyNet()

# Loss and optimizer
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=learning_rate, momentum=momentum)

# Training
print('Training')
for epoch in range(nb_epochs):

    # Set the model to training mode
    model.train()

    # Running loss container
    running_loss = 0.0
示例#3
0
import torch

from model import MyNet

# Create the model and load the weights
model = MyNet()
model.load_state_dict(torch.load('my_network.pth'))

# Create dummy input
dummy_input = torch.rand(1, 3, 32, 32)

# Define input / output names
input_names = ["my_input"]
output_names = ["my_output"]

# Convert the PyTorch model to ONNX
torch.onnx.export(model,
                  dummy_input,
                  "my_network.onnx",
                  verbose=True,
                  input_names=input_names,
                  output_names=output_names)
示例#4
0
data = Variable(data)

# slic
labels = segmentation.slic(im,
                           compactness=args.compactness,
                           n_segments=args.num_superpixels)
labels = labels.reshape(im.shape[0] * im.shape[1])
u_labels = np.unique(labels)
l_inds = []
for i in range(len(u_labels)):
    l_inds.append(np.where(labels == u_labels[i])[0])

# train
from model import MyNet

model = MyNet(data.size(1), args.nChannel, args.nConv)
if use_cuda:
    model.cuda()
model.train()
loss_fn = torch.nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=0.9)
label_colours = np.random.randint(255, size=(100, 3))
for batch_idx in range(args.maxIter):
    # forwarding
    optimizer.zero_grad()
    output = model(data)[0]
    output = output.permute(1, 2, 0).contiguous().view(-1, args.nChannel)
    ignore, target = torch.max(output, 1)
    im_target = target.data.cpu().numpy()
    nLabels = len(np.unique(im_target))
    if args.visualize:
示例#5
0
def main(args):
    #hyper parameter
    end_epoch = 100
    lr = 0.001
    beta1 = 0.5
    beta2 = 0.99
    gpu = 0

    #set model
    model = MyNet()

    #set GPU or CPU
    if gpu >= 0 and torch.cuda.is_available():
        device = 'cuda:{}'.format(gpu)
    else:
        device = 'cpu'
    model.to(device)

    #print params
    params = 0
    for p in model.parameters():
        if p.requires_grad:
            params += p.numel()
    print(params)
    print(model)

    criteria = nn.CrossEntropyLoss()
    optimizer = torch.optim.Adam(model.parameters(),
                                 lr=lr,
                                 betas=(beta1, beta2))

    dataset = MyDataset("data/", is_train=True)
    train_loader = torch.utils.data.DataLoader(dataset,
                                               batch_size=4,
                                               shuffle=True)

    for epoch in range(end_epoch):
        epoch_loss = 0
        epoch_acc = 0
        for i, data in enumerate(train_loader):
            print("\repoch: {} iteration: {}".format(epoch, i), end="")

            inputs, labels = data
            optimizer.zero_grad()
            outputs = model(inputs.to(device))
            _, preds = torch.max(outputs.data, 1)
            loss = criteria(outputs, labels.to(device))

            loss.backward()
            optimizer.step()

            epoch_loss += loss.data.to('cpu') * inputs.size(0)
            epoch_acc += torch.sum(preds.to('cpu') == labels.data)

        epoch_loss /= len(train_loader) * 4
        epoch_acc = epoch_acc / float(len(train_loader) * 4)

        print("[epoch: {}] [Loss: {:.4f}] [Acc: {:.4f}]".format(
            epoch, epoch_loss, epoch_acc))
        if (epoch + 1) % 10 == 0:
            if not os.path.exists("models/" + args.model):
                os.makedirs("models/" + args.model)
            torch.save(model.state_dict(),
                       "models/" + args.model + "/" + str(epoch) + ".pth")
import torch
import torch.optim as optim
from model import MyNet
import time
import copy
from dataLoader import DataGetter
import matplotlib.pyplot as plt
import pickle

from train import train_model

if __name__ == "__main__":

    model = MyNet()
    optimizer = optim.SGD(filter(lambda p: p.requires_grad,
                                 model.parameters()),
                          lr=0.001)
    # Data loader init
    # data_dir = 'D:/data_odometry_gray/dataset'
    data_dir = 'D:/data_odometry_color/dataset/'
    batch_size = 16

    trainData = DataGetter(data_dir, batch_size, 0, 6, randomize_data=True)
    valData = DataGetter(data_dir, batch_size, 7, 7, randomize_data=True)

    model, metrics = train_model(model,
                                 optimizer,
                                 trainData,
                                 valData,
                                 num_epochs=50)
    _, target = torch.max(output, 1)
    im_target = target.data.cpu().numpy()
    n_labels = len(np.unique(im_target))
    im_target_rgb = np.array([label_colours[c % 100] for c in im_target])
    im_target_rgb = im_target_rgb.reshape(im.shape).astype(np.uint8)
    cv2.imshow('output', im_target_rgb)
    cv2.waitKey(viewtime)


def save(output, fname, out_dir=args.out_dir):
    out_path = os.path.join(dir, fname)


# define model
loadedparams = torch.load(args.model_dir, map_location=args.device)
model = MyNet(3)
if use_cuda:
    model.cuda()
loss_fn = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, momentum=0.9)
label_colours = np.random.randint(255, size=(100, 3))
model.eval()

if args.model_dir != '':
    model.load_state_dict(loadedparams['model'], strict=True)
else:
    print('PLEASE LOAD A MODEL_DIR AS AN OPTION')

# main function for testing
if __name__ == '__main__':
示例#8
0
def main(args):
    #hyper parameter
    end_epoch = 200
    lr = 0.001
    beta1 = 0.5
    beta2 = 0.99
    gpu = 0

    #set model
    model = MyNet()

    #set GPU or CPU
    if gpu >= 0 and torch.cuda.is_available():
        device = 'cuda:{}'.format(gpu)
    else:
        device = 'cpu'
    model.to(device)
    model.load_state_dict(
        torch.load("models/" + args.model + "/" + str(args.epoch - 1) +
                   ".pth"))
    model.eval()

    criteria = nn.CrossEntropyLoss()

    dataset = MyDataset("data/", is_train=False)
    test_loader = torch.utils.data.DataLoader(dataset,
                                              batch_size=1,
                                              shuffle=True)

    with torch.no_grad():
        epoch_loss = 0
        epoch_acc = 0
        if not os.path.exists("result/" + args.model + "/true_favo/"):
            os.makedirs("result/" + args.model + "/true_favo/")
            os.makedirs("result/" + args.model + "/true_no_favo/")
            os.makedirs("result/" + args.model + "/false_favo/")
            os.makedirs("result/" + args.model + "/false_no_favo/")
        for i, data in enumerate(test_loader):
            print("\riteration: {}".format(i), end="")
            inputs, labels = data
            outputs = model(inputs.to(device))
            _, preds = torch.max(outputs.data, 1)
            loss = criteria(outputs, labels.to(device))

            favo_preds = preds.to('cpu')
            if favo_preds == labels:
                if favo_preds == 1:
                    save_image(
                        inputs, "result/" + args.model + "/true_favo/" +
                        str(i) + ".jpg")
                elif favo_preds == 0:
                    save_image(
                        inputs, "result/" + args.model + "/true_no_favo/" +
                        str(i) + ".jpg")
            else:
                if favo_preds == 1:
                    save_image(
                        inputs, "result/" + args.model + "/false_favo/" +
                        str(i) + ".jpg")
                elif favo_preds == 0:
                    save_image(
                        inputs, "result/" + args.model + "/false_no_favo/" +
                        str(i) + ".jpg")

            epoch_loss += loss.data.to('cpu') * inputs.size(0)
            epoch_acc += torch.sum(favo_preds == labels.data)

        epoch_loss /= len(test_loader)
        epoch_acc = epoch_acc / float(len(test_loader))
        print("[Loss: {}] [Acc: {}]".format(epoch_loss, epoch_acc))
示例#9
0
    data = data.cuda().unsqueeze(0)
    return im, Variable(data)

def crop(img,width=128,height=128):
    h,w,_ = img.shape
    xpos = random.randint(0,w-width-1)
    ypos = random.randint(0,h-height-1)
    return img[ypos:ypos+height,xpos:xpos+width]

def save(model,k):
    data = {'model': model.state_dict()}
    data['k'] = k
    torch.save(data,'segnet.pth')

# define model
model = MyNet( 3 )
if use_cuda:
    model.cuda()
model.train()
loss_fn = torch.nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=0.9)
label_colours = np.random.randint(255,size=(100,3))

#for batch_idx in range(args.maxIter):
for batch_idx in count():
# load image
    im,data = loadData()

    # slic
    labels = segmentation.slic(im, compactness=args.compactness, n_segments=args.num_superpixels)
    labels = labels.reshape(im.shape[0]*im.shape[1])
示例#10
0
#classes for defect banknote
classes = {
    'Dot': 'dot',
    'Miss_print': 'miss_print',
    'Over_ink': 'over_ink',
    'Set_off': 'set_off',
    'Under_ink': 'under_ink',
    'Wiping': 'wiping',
}

class_names = ['dot', 'miss_print', 'over_ink', 'set_off', 'under_ink', 'wiping']

# instantiate the CNN
use_cuda = torch.cuda.is_available()
layer_sizes = [512, 256, 128]
model = MyNet(output_size=6, layer_sizes=layer_sizes)
if use_cuda:
    model = model.cuda()

model.load_state_dict(torch.load('model/model_resnet101_512_256_128_back.pt', map_location=torch.device('cpu')))


# ### Get the directory path from the argument

if __name__ == "__main__":
    dir_path = sys.argv[1].strip()
#in put the directory path
#dir_path = input('Enter the images directory path: ').strip()

#print(dir_path)
#delete the output folder if exists
示例#11
0
    def main(self):
        """
        训练接口主函数,完成整个训练流程
        1. 创建训练集和验证集的DataLoader类
        2. 初始化带训练的网络
        3. 选择合适的优化器
        4. 训练并验证指定个epoch,保存其中评价指标最好的模型,并打印训练过程信息
        5. TODO: 可视化训练过程信息
        """
        opts = self.opts
        if not os.path.exists(opts.checkpoints_dir):
            os.mkdir(opts.checkpoints_dir)
        random_seed = opts.random_seed
        train_dataset = MyDataset(opts.dataset_dir,
                                  seed=random_seed,
                                  mode="train",
                                  train_val_ratio=0.9)
        val_dataset = MyDataset(opts.dataset_dir,
                                seed=random_seed,
                                mode="val",
                                train_val_ratio=0.9)
        train_loader = DataLoader(train_dataset,
                                  opts.batch_size,
                                  shuffle=True,
                                  num_workers=opts.num_workers)
        val_loader = DataLoader(val_dataset,
                                batch_size=1,
                                shuffle=False,
                                num_workers=opts.num_workers)
        num_train = len(train_dataset)
        num_val = len(val_dataset)

        if opts.pretrain is None:
            model = MyNet()
        else:
            model = torch.load(opts.pretrain)
        if opts.use_GPU:
            model.to(opts.GPU_id)
        optimizer = torch.optim.SGD(model.parameters(),
                                    lr=opts.lr,
                                    momentum=0.9,
                                    weight_decay=opts.weight_decay)
        # optimizer = torch.optim.Adam(model.parameters(), lr=opts.lr, weight_decay=opts.weight_decay)

        best_metric = 1000000
        for e in range(opts.start_epoch, opts.epoch + 1):
            t = time.time()
            self.__train(model, train_loader, optimizer, e, num_train, opts)
            t2 = time.time()
            print("Training consumes %.2f second\n" % (t2 - t))
            with open(os.path.join(opts.checkpoints_dir, "log.txt"),
                      "a+") as log_file:
                log_file.write("Training consumes %.2f second\n" % (t2 - t))
            if e % opts.save_freq == 0 or e == opts.epoch + 1:
                # t = time.time()
                # metric = self.__validate(model, val_loader, e, num_val, opts)
                # t2 = time.time()
                # print("Validation consumes %.2f second\n" % (t2 - t))
                # with open(os.path.join(opts.checkpoints_dir, "log.txt"), "a+") as log_file:
                #     log_file.write("Validation consumes %.2f second\n" % (t2 - t))
                # if best_metric>metric:
                #     best_metric = metric
                #     print("Epoch %d is now the best epoch with metric %.4f\n"%(e, best_metric))
                #     with open(os.path.join(opts.checkpoints_dir, "log.txt"), "a+") as log_file:
                #         log_file.write("Epoch %d is now the best epoch with metric %.4f\n"%(e, best_metric))
                self.__save_model(model, e, opts)
示例#12
0
import torch
from model import MyNet
from dataLoader import DataGetter, Euler2Rot
import numpy as np
from plotting import plotXYZ
from tkinter import filedialog as fd

# data_dir = 'D:/data_odometry_gray/dataset'
data_dir = 'D:/data_odometry_color/dataset'
folder_num = 6
batch_size = 1

model_name = fd.askopenfilename()
model = MyNet()
model.load_state_dict(torch.load(model_name))

getter = DataGetter(data_dir,
                    batch_size,
                    folder_num,
                    folder_num,
                    randomize_data=False)

if torch.cuda.is_available():
    device = 'cuda'
    model.cuda()
else:
    device = 'cpu'
model.eval()

running_R = torch.eye(3, dtype=torch.float)
running_t = torch.zeros((1, 3), dtype=torch.float)