Esempio n. 1
0
def train():
    network = Activity_Detection(2048, 4096, 128, 300, 128, 101).cuda()
    optimizer = optim.Adam(network.parameters(), lr = 3e-4)
    criterion = nn.BCEWithLogitsLoss()

    dataset_train = Train_Dataset(True)
    train_loader = DataLoader(dataset_train, batch_size=100, shuffle=True, num_workers=3)

    for epoch in range(1000):
        epoch_loss = 0.0
        t_batch = 0.0
        network.train()
        for batch_index, data in tqdm.tqdm(enumerate(train_loader)):
            data_r, data_c, data_a, output = data
            data_r = data_r.cuda()
            data_c = data_c.cuda()
            data_a = data_a.cuda()
            output = output.type(torch.cuda.FloatTensor)
            final_output = output.view(-1, 101)
            optimizer.zero_grad()
            # print(data_r.size(), data_c.size(), data_a.size(), output.size())
            pred = network(data_r, data_c, data_a)
            final_pred = pred.view(-1, 101)
            loss = criterion(final_pred, final_output)
            epoch_loss += loss
            loss.backward()
            optimizer.step()
            t_batch += 1
        epoch_loss = epoch_loss/t_batch
        writer.add_scalar('Epoch Training Loss', epoch_loss, epoch)
        print(epoch_loss)

        network.eval()
        dataset_test = Test_Dataset(True)
        test_loader = DataLoader(dataset_test, batch_size=100, shuffle=True, num_workers=3)

        mtr = meter.APMeter()
        for batch_index, data in tqdm.tqdm(enumerate(test_loader)):
            data1_r, data1_c, data1_a, output1 = data
            data1_r = data1_r.cuda()
            data1_c = data1_c.cuda()
            data1_a = data1_a.cuda()
            # data1 = torch.squeeze(data1)
            with torch.no_grad():
                output1 = output1.type(torch.cuda.FloatTensor)
                pred = network(data1_r, data1_c, data1_a)
                max_pooling = nn.MaxPool2d((20,1))
                pred = torch.squeeze(max_pooling(pred))
                sig_layer = nn.Sigmoid()
                pred = sig_layer(pred)
                pred = pred[:,:100]
                # print(pred)
                # print(pred.size())
                mtr.add(pred, output1)
        map_value = mtr.value().mean()
        writer.add_scalar('mAP', map_value, epoch)
        print(map_value)
Esempio n. 2
0
def train():
    network = Activity_Detection(4096, 512, 128, 101).cuda()
    optimizer = optim.Adam(network.parameters(), lr=3e-4)
    criterion = nn.CrossEntropyLoss()

    dataset_train = Train_Dataset(True)
    train_loader = DataLoader(dataset_train,
                              batch_size=1,
                              shuffle=True,
                              num_workers=3)

    for epoch in range(100):
        epoch_loss = 0.0
        t_batch = 0.0
        network.train()
        for batch_index, data in tqdm.tqdm(enumerate(train_loader)):
            data, output = data
            data = data.cuda()
            data = torch.squeeze(data)
            output = output.cuda()
            output = torch.squeeze(output)

            optimizer.zero_grad()

            pred = network(data)
            loss = criterion(pred, output)

            epoch_loss += loss

            loss.backward()
            optimizer.step()

            t_batch += 1

        epoch_loss = epoch_loss / t_batch
        writer.add_scalar('Epoch Training Loss', epoch_loss, epoch)
        print(epoch_loss)

        network.eval()
        dataset_test = Test_Dataset(True)
        test_loader = DataLoader(dataset_test,
                                 batch_size=1,
                                 shuffle=True,
                                 num_workers=3)
        correct = 0.0
        total = 0.0
        for batch_index, data in tqdm.tqdm(enumerate(test_loader)):
            data1, output1 = data
            data1 = data1.cuda()
            # data1 = torch.squeeze(data1)
            output1 = output1.cuda()
            # output1 = torch.squeeze(output1)
            pred = network(data1)
            pred = nn.functional.softmax(pred, dim=1)

            pred_class = torch.argmax(pred, dim=1)

            pred_class = int(pred_class.item())
            actual_class = int(output1.item())

            if pred_class == actual_class:
                correct += 1
            total += 1

        accuracy = correct / total
        writer.add_scalar('Test Accuracy', accuracy, epoch)
        print(accuracy)