Пример #1
0
from transform import Compose, ToTensor, Resize
from model import MyNet

torch.cuda.set_device(0)
transform_ = Compose([Resize((112, 112)), ToTensor()])
xx = FightDataset("./fight_classify", tranform=transform_)

dataloader = DataLoader(xx, batch_size=1, shuffle=True)
# for i_batch, sample_batched in enumerate(dataloader):
#     print(i_batch)
#     print(sample_batched["image"].size())
dev = torch.device("cuda:0")
model = MyNet().to(dev)

criterion = torch.nn.MSELoss(reduction='sum')
optimizer = torch.optim.SGD(model.parameters(), lr=1e-8, momentum=0.9)

for t in range(20):
    # Forward pass: Compute predicted y by passing x to the model
    for i_batch, sample_batched in enumerate(dataloader):
        image = sample_batched["image"]
        label = sample_batched["label"]
        # label = torch.transpose(label, 0,1)
        y_pred = model(image)
        # print(y_pred)
        # print(label)
        # Compute and print loss
        loss = criterion(y_pred, label)
        if i_batch % 2000 == 1999:
            print(t, loss.item())
Пример #2
0
test_shape = testset.data.shape
train_nb = train_shape[0]
test_nb = test_shape[0]
height = train_shape[1]
width = train_shape[2]
classes = trainset.classes
print('Training set size : %d' % train_nb)
print('Test set size     : %d' % test_nb)
print('Image size        : %d x %d\n' % (height, width))

# Build the network
model = MyNet()

# Loss and optimizer
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=learning_rate, momentum=momentum)

# Training
print('Training')
for epoch in range(nb_epochs):

    # Set the model to training mode
    model.train()

    # Running loss container
    running_loss = 0.0

    # Iterate through mini-batches
    for i, data in enumerate(trainloader, 0):

        # Get the mini-batch data
Пример #3
0
def main(args):
    #hyper parameter
    end_epoch = 100
    lr = 0.001
    beta1 = 0.5
    beta2 = 0.99
    gpu = 0

    #set model
    model = MyNet()

    #set GPU or CPU
    if gpu >= 0 and torch.cuda.is_available():
        device = 'cuda:{}'.format(gpu)
    else:
        device = 'cpu'
    model.to(device)

    #print params
    params = 0
    for p in model.parameters():
        if p.requires_grad:
            params += p.numel()
    print(params)
    print(model)

    criteria = nn.CrossEntropyLoss()
    optimizer = torch.optim.Adam(model.parameters(),
                                 lr=lr,
                                 betas=(beta1, beta2))

    dataset = MyDataset("data/", is_train=True)
    train_loader = torch.utils.data.DataLoader(dataset,
                                               batch_size=4,
                                               shuffle=True)

    for epoch in range(end_epoch):
        epoch_loss = 0
        epoch_acc = 0
        for i, data in enumerate(train_loader):
            print("\repoch: {} iteration: {}".format(epoch, i), end="")

            inputs, labels = data
            optimizer.zero_grad()
            outputs = model(inputs.to(device))
            _, preds = torch.max(outputs.data, 1)
            loss = criteria(outputs, labels.to(device))

            loss.backward()
            optimizer.step()

            epoch_loss += loss.data.to('cpu') * inputs.size(0)
            epoch_acc += torch.sum(preds.to('cpu') == labels.data)

        epoch_loss /= len(train_loader) * 4
        epoch_acc = epoch_acc / float(len(train_loader) * 4)

        print("[epoch: {}] [Loss: {:.4f}] [Acc: {:.4f}]".format(
            epoch, epoch_loss, epoch_acc))
        if (epoch + 1) % 10 == 0:
            if not os.path.exists("models/" + args.model):
                os.makedirs("models/" + args.model)
            torch.save(model.state_dict(),
                       "models/" + args.model + "/" + str(epoch) + ".pth")
Пример #4
0
                           n_segments=args.num_superpixels)
labels = labels.reshape(im.shape[0] * im.shape[1])
u_labels = np.unique(labels)
l_inds = []
for i in range(len(u_labels)):
    l_inds.append(np.where(labels == u_labels[i])[0])

# train
from model import MyNet

model = MyNet(data.size(1), args.nChannel, args.nConv)
if use_cuda:
    model.cuda()
model.train()
loss_fn = torch.nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=0.9)
label_colours = np.random.randint(255, size=(100, 3))
for batch_idx in range(args.maxIter):
    # forwarding
    optimizer.zero_grad()
    output = model(data)[0]
    output = output.permute(1, 2, 0).contiguous().view(-1, args.nChannel)
    ignore, target = torch.max(output, 1)
    im_target = target.data.cpu().numpy()
    nLabels = len(np.unique(im_target))
    if args.visualize:
        im_target_rgb = np.array([label_colours[c % 100] for c in im_target])
        im_target_rgb = im_target_rgb.reshape(im.shape).astype(np.uint8)
        cv2.imshow("output", im_target_rgb)
        cv2.waitKey(10)
import torch
import torch.optim as optim
from model import MyNet
import time
import copy
from dataLoader import DataGetter
import matplotlib.pyplot as plt
import pickle

from train import train_model

if __name__ == "__main__":

    model = MyNet()
    optimizer = optim.SGD(filter(lambda p: p.requires_grad,
                                 model.parameters()),
                          lr=0.001)
    # Data loader init
    # data_dir = 'D:/data_odometry_gray/dataset'
    data_dir = 'D:/data_odometry_color/dataset/'
    batch_size = 16

    trainData = DataGetter(data_dir, batch_size, 0, 6, randomize_data=True)
    valData = DataGetter(data_dir, batch_size, 7, 7, randomize_data=True)

    model, metrics = train_model(model,
                                 optimizer,
                                 trainData,
                                 valData,
                                 num_epochs=50)
Пример #6
0
    def main(self):
        """
        训练接口主函数,完成整个训练流程
        1. 创建训练集和验证集的DataLoader类
        2. 初始化带训练的网络
        3. 选择合适的优化器
        4. 训练并验证指定个epoch,保存其中评价指标最好的模型,并打印训练过程信息
        5. TODO: 可视化训练过程信息
        """
        opts = self.opts
        if not os.path.exists(opts.checkpoints_dir):
            os.mkdir(opts.checkpoints_dir)
        random_seed = opts.random_seed
        train_dataset = MyDataset(opts.dataset_dir,
                                  seed=random_seed,
                                  mode="train",
                                  train_val_ratio=0.9)
        val_dataset = MyDataset(opts.dataset_dir,
                                seed=random_seed,
                                mode="val",
                                train_val_ratio=0.9)
        train_loader = DataLoader(train_dataset,
                                  opts.batch_size,
                                  shuffle=True,
                                  num_workers=opts.num_workers)
        val_loader = DataLoader(val_dataset,
                                batch_size=1,
                                shuffle=False,
                                num_workers=opts.num_workers)
        num_train = len(train_dataset)
        num_val = len(val_dataset)

        if opts.pretrain is None:
            model = MyNet()
        else:
            model = torch.load(opts.pretrain)
        if opts.use_GPU:
            model.to(opts.GPU_id)
        optimizer = torch.optim.SGD(model.parameters(),
                                    lr=opts.lr,
                                    momentum=0.9,
                                    weight_decay=opts.weight_decay)
        # optimizer = torch.optim.Adam(model.parameters(), lr=opts.lr, weight_decay=opts.weight_decay)

        best_metric = 1000000
        for e in range(opts.start_epoch, opts.epoch + 1):
            t = time.time()
            self.__train(model, train_loader, optimizer, e, num_train, opts)
            t2 = time.time()
            print("Training consumes %.2f second\n" % (t2 - t))
            with open(os.path.join(opts.checkpoints_dir, "log.txt"),
                      "a+") as log_file:
                log_file.write("Training consumes %.2f second\n" % (t2 - t))
            if e % opts.save_freq == 0 or e == opts.epoch + 1:
                # t = time.time()
                # metric = self.__validate(model, val_loader, e, num_val, opts)
                # t2 = time.time()
                # print("Validation consumes %.2f second\n" % (t2 - t))
                # with open(os.path.join(opts.checkpoints_dir, "log.txt"), "a+") as log_file:
                #     log_file.write("Validation consumes %.2f second\n" % (t2 - t))
                # if best_metric>metric:
                #     best_metric = metric
                #     print("Epoch %d is now the best epoch with metric %.4f\n"%(e, best_metric))
                #     with open(os.path.join(opts.checkpoints_dir, "log.txt"), "a+") as log_file:
                #         log_file.write("Epoch %d is now the best epoch with metric %.4f\n"%(e, best_metric))
                self.__save_model(model, e, opts)