示例#1
0
def main():
    logging.basicConfig(filename='log/experiment_spatial.log',
                        level=logging.INFO)
    logging.info("Start training image network: {}".format(
        time.asctime(time.localtime(time.time()))))
    ctx = mx.gpu(0)
    classes_labels, train_videos_classes, test_videos_classes = get_ucf101_split(
        ucf.split_dir, ucf.split_id)

    #videos = list(test_videos_classes.keys())
    #sample_videos= random.sample(videos, 500)
    #test_videos_classes_samples = {}
    #for video in sample_videos:
    #    test_videos_classes_samples[video] = test_videos_classes[video]

    cm = ConvNet(model_params=resnet_50,
                 data_params=ucf.image,
                 train_params=train_image,
                 test_params=test_image,
                 train_videos_classes=train_videos_classes,
                 test_videos_classes=test_videos_classes,
                 classes_labels=classes_labels,
                 num_classes=ucf.num_classes,
                 ctx=ctx,
                 mode='spatial')
    cm.train()

    return
def normal_train(args, loader_train, loader_test, dtype):

    model = ConvNet()
    model = model.type(dtype)
    model.train()

    loss_f = nn.CrossEntropyLoss()

    SCHEDULE_EPOCHS = [15]
    learning_rate = 0.01

    for num_epochs in SCHEDULE_EPOCHS:

        print('\nTraining %d epochs with learning rate %.4f' %
              (num_epochs, learning_rate))

        optimizer = optim.Adam(model.parameters(), lr=learning_rate)

        for epoch in range(num_epochs):

            print('\nTraining epoch %d / %d ...\n' % (epoch + 1, num_epochs))
            # print(model.training)

            for i, (X_, y_) in enumerate(loader_train):

                X = Variable(X_.type(dtype), requires_grad=False)
                y = Variable(y_.type(dtype), requires_grad=False).long()

                preds = model(X)

                loss = loss_f(preds, y)

                if (i + 1) % args.print_every == 0:
                    print('Batch %d done, loss = %.7f' % (i + 1, loss.item()))

                optimizer.zero_grad()
                loss.backward()
                optimizer.step()

            print('Batch %d done, loss = %.7f' % (i + 1, loss.item()))

            test(model, loader_test, dtype)

        learning_rate *= 0.1

    return model
def unrolled(args, loader_train, loader_test, dtype):

    model = ConvNet()
    model = model.type(dtype)
    model.train()

    SCHEDULE_EPOCHS = [50, 50]
    learning_rate = 5e-4

    for num_epochs in SCHEDULE_EPOCHS:

        print('\nTraining %d epochs with learning rate %.7f' %
              (num_epochs, learning_rate))

        optimizer = optim.Adam(model.parameters(), lr=learning_rate)

        for epoch in range(num_epochs):

            print('\nTraining epoch %d / %d ...\n' % (epoch + 1, num_epochs))
            # print(model.training)

            for i, (X_, y_) in enumerate(loader_train):

                X = Variable(X_.type(dtype), requires_grad=False)
                y = Variable(y_.type(dtype), requires_grad=False)

                loss = cw_train_unrolled(model, X, y, dtype)

                optimizer.zero_grad()
                loss.backward()
                optimizer.step()

                if (i + 1) % args.print_every == 0:
                    print('Batch %d done, loss = %.7f' % (i + 1, loss.item()))

                    test(model, loader_test, dtype)

            print('Batch %d done, loss = %.7f' % (i + 1, loss.item()))

        learning_rate *= 0.1

    return model
示例#4
0
class Training:
    def __init__(self, epoch, learningRate, batchSize, imageSize, L2Rate, trainPath):
        super(Training, self).__init__()
        self.epoch = epoch
        self.learningRate = learningRate
        self.batchSize = batchSize
        self.imageSize = imageSize
        self.L2Rate = L2Rate
        self.trainPath = trainPath
        self.data_size = calculate_data_size(self.trainPath)
        self.num_batches = self.data_size // batchSize
        self.data_loader = run_loader('train', trainPath, batchSize, imageSize, shuffle=True)
        self.model = ConvNet(10)
        self.train()

    def train(self):
        self.model.train()

        crossentropy = torch.nn.CrossEntropyLoss()
        optimizer = torch.optim.Adam(self.model.parameters(), lr=self.learningRate, weight_decay=self.L2Rate)

        for epoch in range(self.epoch):
            epoch_loss = 0
            epoch_acc = 0
            for X, y in tqdm(self.data_loader):
                optimizer.zero_grad()
                out = self.model(X)

                loss = crossentropy(out, y)
                loss.backward()
                optimizer.step()

                epoch_loss += loss.item()  # makes it to python float
                predictions = torch.argmax(out, 1)
                epoch_acc += torch.sum(predictions == y).item()

            epoch_loss = epoch_loss / self.num_batches
            epoch_acc = epoch_acc / self.data_size
            print(f"Epoch {epoch}:", "ACC:", epoch_acc, "LOSS:", epoch_loss)

            torch.save(self.model.state_dict(), f"Trained/Model_{epoch}.model")
示例#5
0
# train loop
# use k-fold validation
k_fold = 10
fold_size = int(train_data_size // k_fold)
for i in range(k_fold):

    # split data into train/val
    val_data_curr_fold = train_data[i * fold_size:(i + 1) * fold_size]
    train_data_curr_fold_head = train_data[:i * fold_size]
    train_data_curr_fold_tail = train_data[(i + 1) * fold_size:]
    train_data_curr_fold = np.concatenate(
        (train_data_curr_fold_head, train_data_curr_fold_tail))

    # epoch
    model = model.train()
    for curr_epoch in range(epoch):

        # train minibatch
        train_pred = []
        train_data_curr_fold = train_data_curr_fold[np.random.permutation(
            len(train_data_curr_fold))]
        for b in minibatch(train_data_curr_fold, batch_size):
            train_batch_pred = train_batch(model, criterion, optimizer, b)
            train_pred.append(train_batch_pred)
        train_pred = np.concatenate(train_pred, axis=0)

        val_pred = []
        for b in minibatch(val_data_curr_fold, batch_size):
            val_batch_pred = val_batch(model, criterion, optimizer, b)
            val_pred.append(val_batch_pred)
示例#6
0
model = ConvNet()
if torch.cuda.is_available():
    model = model.cuda()
    summary(model, input_size=(3, 640, 640), device='cuda')
else:
    summary(model, input_size=(3, 640, 640), device='cpu')
# model.load_state_dict(torch.load('no_gassuion_epoch35.pth'))

criterion = MultiBranchLoss(input_size=(640, 640), writer=writer, obj_scale=obj_scale, nobj_scale=nobj_scale,
                            loc_scale=loc_scale)
optimizer = Adam(model.parameters(), lr=learing_rate)

batchs_loss = 0
for epoch in range(epochs):
    model.train()
    dataset = WIDERFaceDetection(WIDERFace_ROOT, transform=SSDAugmentation(640, (127.5, 127.5, 127.5)))
    dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True, collate_fn=collate_fn)
    for i, (images, labels) in enumerate(dataloader):
        batch_num = epoch * len(dataloader) + i + 1
        optimizer.zero_grad()
        if torch.cuda.is_available():
            images = images.cuda()
        outputs = model(images)
        loss = criterion(outputs, labels, batch_num)
        batchs_loss += loss.item()
        loss.backward()
        optimizer.step()
        if batch_num % show_iter == 0:
            average_loss = batchs_loss / show_iter
            print("epoch {} batch {}:".format(epoch, i))
示例#7
0
            outputs = net(images)
            _, predicted = torch.max(outputs.data, 1)
            total += labels.size(0)
            correct += (predicted == labels).sum().item()

    return correct / total


net = ConvNet().to(device)
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)

mt = MetricTracker()

for epoch in range(EPOCHS):
    net.train()

    running_loss = 0.0
    for i, (inputs, labels) in enumerate(trainloader):
        inputs, labels = inputs.to(device), labels.to(device)

        # zero the parameter gradients
        optimizer.zero_grad()

        # forward + backward + optimize
        outputs = net(inputs)
        loss = criterion(outputs, labels)
        loss.backward()
        optimizer.step()

        # print statistics