Example #1
0
def generate_gt_data(index):
    ''' Generate GT data as a detection result for testing
    '''

    my_dataset = ctDataset()
    gt_res = my_dataset.__getitem__(index)
    for key in gt_res:
        gt_res[key]  = torch.from_numpy(gt_res[key])
    wh = torch.zeros((1, 2, 128, 128))
    reg = torch.zeros((1, 2, 128, 128))
    hm = gt_res['hm'].reshape(1, 1, 128, 128)

    for i in range(128):

        if gt_res['reg_mask'][i] == 0:
            continue
        else:
            ind = gt_res['ind'][i]
            height_idx = int(ind // 128)
            width_idx = int(ind % 128)
            wh[0, 0, height_idx, width_idx] = gt_res['wh'][i, 0]
            wh[0, 1, height_idx, width_idx] = gt_res['wh'][i, 1]

            reg[0, 0, height_idx, width_idx] = gt_res['reg'][i, 0]
            reg[0, 1, height_idx, width_idx] = gt_res['reg'][i, 1]


    return hm, wh, reg
Example #2
0
model.train()

learning_rate = 1.25e-4
num_epochs = 150

# different learning rate
params=[]
params_dict = dict(model.named_parameters())
for key,value in params_dict.items():
    params += [{'params':[value],'lr':learning_rate}]

#optimizer = torch.optim.SGD(params, lr=learning_rate, momentum=0.9, weight_decay=5e-4)
optimizer = torch.optim.Adam(params, lr=learning_rate, weight_decay=1e-4)


train_dataset = ctDataset(split='train')
train_loader = DataLoader(train_dataset,batch_size=2,shuffle=False,num_workers=0)  # num_workers是加载数据(batch)的线程数目

test_dataset = ctDataset(split='val')
test_loader = DataLoader(test_dataset,batch_size=4,shuffle=False,num_workers=0)
print('the dataset has %d images' % (len(train_dataset)))


num_iter = 0

best_test_loss = np.inf 

for epoch in range(num_epochs):
    model.train()
    if epoch == 90:
        learning_rate= learning_rate * 0.1 
def main():

    use_gpu = torch.cuda.is_available()
    print("Use CUDA? ", use_gpu)

    model = DlaNet(34)

    if (use_gpu):
        # os.environ["CUDA_VISIBLE_DEVICES"] = '0'
        # model = nn.DataParallel(model)
        # print('Using ', torch.cuda.device_count(), "CUDAs")
        print('cuda', torch.cuda.current_device(), torch.cuda.device_count())
        device = torch.device("cuda")
        model.cuda()
    else:
        device = torch.device("cpu")

    loss_weight = {'hm_weight': 1, 'wh_weight': 0.1, 'reg_weight': 0.1}
    criterion = CtdetLoss(loss_weight)

    model.train()

    learning_rate = 5e-4
    num_epochs = 70

    # different learning rate
    params = []
    params_dict = dict(model.named_parameters())
    for key, value in params_dict.items():
        params += [{'params': [value], 'lr': learning_rate}]

    optimizer = torch.optim.Adam(params, lr=learning_rate, weight_decay=1e-4)

    # split into training and testing set
    full_dataset = ctDataset()
    full_dataset_len = full_dataset.__len__()
    print("Full dataset has ", full_dataset_len, " images.")
    train_size = int(0.8 * full_dataset_len)
    test_size = full_dataset_len - train_size
    train_dataset, test_dataset = torch.utils.data.random_split(full_dataset, [train_size, test_size], \
                                                                generator=torch.Generator().manual_seed(42))
    print("Training set and testing set has: ", train_dataset.__len__(), \
            " and ", test_dataset.__len__(), " images respectively.")

    train_loader = DataLoader(train_dataset,
                              batch_size=8,
                              shuffle=False,
                              num_workers=0)
    test_loader = DataLoader(test_dataset,
                             batch_size=8,
                             shuffle=False,
                             num_workers=0)

    best_test_loss = np.inf

    loss_log = np.empty((0, 3))

    for epoch in range(num_epochs):
        model.train()
        if epoch == 45:
            learning_rate = learning_rate * 0.1
        if epoch == 60:
            learning_rate = learning_rate * (0.1**2)
        for param_group in optimizer.param_groups:
            param_group['lr'] = learning_rate

        total_loss = 0.0

        for i, sample in enumerate(train_loader):

            for k in sample:
                sample[k] = sample[k].to(device=device, non_blocking=True)

            pred = model(sample['input'])
            loss = criterion(pred, sample)
            total_loss += loss.item()
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            if (i + 1) % 5 == 0:

                print(
                    'Epoch [%d/%d], Iter [%d/%d] Loss: %.4f, average_loss: %.4f'
                    % (epoch + 1, num_epochs, i + 1, len(train_loader),
                       loss.data, total_loss / (i + 1)))

        # validation
        validation_loss = 0.0
        model.eval()
        for i, sample in enumerate(test_loader):
            if use_gpu:
                for k in sample:
                    sample[k] = sample[k].to(device=device, non_blocking=True)

            pred = model(sample['input'])
            loss = criterion(pred, sample)
            validation_loss += loss.item()
        validation_loss /= len(test_loader)

        print('Epoch [%d/%d] Validation loss %.5f' %
              (epoch + 1, num_epochs, validation_loss))

        loss_log = np.append(
            loss_log,
            [[epoch + 1, total_loss / len(train_loader), validation_loss]],
            axis=0)
        np.savetxt('../loss_log.csv', loss_log, delimiter=',')

        if best_test_loss > validation_loss:
            best_test_loss = validation_loss
            print('Get best test loss.')
            torch.save(model.state_dict(), '../best.pth')

        torch.save(model.state_dict(), '../' + str(epoch + 1) + '_epoch.pth')
        # os.environ["CUDA_VISIBLE_DEVICES"] = '0'
        # model = nn.DataParallel(model)
        # print('Using ', torch.cuda.device_count(), "CUDAs")
        print('cuda', torch.cuda.current_device(), torch.cuda.device_count())
        device = torch.device('cuda:1')
        model.load_state_dict(torch.load('../best.pth'))
        model.to(device)
    else:
        device = torch.device('cpu')
        model.load_state_dict(
            torch.load('../best.pth', map_location=torch.device('cpu')))

    model.eval()

    # get the input from the same data loader
    full_dataset = ctDataset()
    full_dataset_len = full_dataset.__len__()
    train_size = int(0.8 * full_dataset_len)
    test_size = full_dataset_len - train_size
    train_dataset, test_dataset = torch.utils.data.random_split(full_dataset, [train_size, test_size], \
                                                                generator=torch.Generator().manual_seed(42))
    train_loader = DataLoader(train_dataset,
                              batch_size=1,
                              shuffle=False,
                              num_workers=0)
    test_loader = DataLoader(test_dataset,
                             batch_size=1,
                             shuffle=False,
                             num_workers=0)

    my_predictor = Predictor(use_gpu)
Example #5
0
    transforms.RandomGrayscale(),
    # # transforms.ToTensor(),
    # torch.from_numpy,
    # transforms.ToPILImage(),
    # transforms.ColorJitter(hue=.5, saturation=.5)
    # transforms.ToTensor()
])

if config["center"]:
    centered = "centered"
else:
    centered = ""

train_dataset = ctDataset(
    split="train",
    transform=transform,
    input_size=int(config["input_size"]),
    center=bool(config["center"]),
)
# train_dataset = ctDataset(split='train')
train_loader = DataLoader(train_dataset,
                          batch_size=config["batch_size"],
                          shuffle=True,
                          num_workers=0)

test_dataset = ctDataset(split="val")
test_loader = DataLoader(test_dataset,
                         batch_size=4,
                         shuffle=False,
                         num_workers=0)
print("[INFO]: The dataset has %d images" % (len(train_dataset)))