Example #1
0
    with torch.no_grad():
        # temp = iter(valid_loader_label)
        for i, batch in enumerate(valid_loader):
            # label_batch = next(temp)
            # coors_gt = label_batch['coor_bc'].numpy()

            inputs = batch['image'].float().cuda()
            real_hmaps = batch['hmap'].float().cuda()
            coors_gt = batch['coor_bc'].numpy()
            img_names = batch['img_name']

            outputs = student(inputs)
            coors_pred = []
            for out in outputs:
                coors_pred.append(
                    list(heatmap_to_coor(out.detach().cpu().numpy())))
            coors_pred = np.asarray(coors_pred)
            coors_pred = coors_pred * [1280 / 224, 1024 / 224]
            coors_pred = np.asarray(coors_pred).astype(int)
            distances = np.sum((coors_gt - coors_pred)**2, axis=1)**0.5
            distances = np.asarray(distances).astype(float)
            all_distances.append(distances)

            empty_batch = True
            for idx, input in enumerate(inputs):
                input = input.cuda()

                # pseudo_hmap = student(input.unsqueeze(0)).squeeze(0)
                pseudo_hmap = outputs[idx]
                # pseudo_hmap = spike(pseudo_hmap)
                pseudo = torch.cat([input, pseudo_hmap.cuda()],
Example #2
0
with torch.no_grad():
    distances = []
    accuracy = []
    for i, batch in enumerate(valid_loader):
        inputs = batch['image'].float().cuda()
        labels = batch['hmap'].float().cuda()
        coors = batch['coor_1'].numpy()
        img_names = batch['img_name']
        # origin_img = batch['origin_img']
        # origin_shape = np.array(origin_img[0].cpu().detach().numpy().shape)
        outputs = model(inputs)
        valid_loss += criterion(outputs, labels)
        outputs = outputs.cpu().detach().numpy()

        for index, out in enumerate(outputs):
            coor_pred = np.array(heatmap_to_coor(out.squeeze()))
            coor_pred = (coor_pred * [1280 / 224, 1024 / 224]).astype(int)
            coor_real = coors[index]
            dist = np.sum((coor_pred - coor_real)**2)**0.5
            acc = ([1, 1] -
                   (np.absolute(coor_pred - coor_real) / [1280, 1024])) * 100

            f.write(img_names[index]\
                    + ',' + str(coor_real[0]) \
                    + ',' + str(coor_real[1]) \
                    + ',' + str(coor_pred[0]) \
                    + ',' + str(coor_pred[1])\
                    + '\n')
            # print("wrtie to file")

            distances.append(dist)
Example #3
0
def train():
    #cross validation
    order = np.random.RandomState().permutation(len(labeled_dataset))
    train_dataset, val_dataset = split_dataset(labeled_dataset,
                                               int(training_size * 0.9), order)
    train_loader = DataLoader(train_dataset, batch_size=batch_size, \
                            num_workers=4, shuffle=True)
    valid_loader = DataLoader(val_dataset, batch_size=batch_size, \
                            num_workers=4)
    max_total_acc_x = 0
    max_euclidean_distance = 99999
    for epoch in range(num_epochs):
        dataloader_iterator = iter(train_loader)
        try:
            sample_batched = next(dataloader_iterator)
        except StopIteration:
            order = np.random.RandomState().permutation(len(labeled_dataset))
            train_dataset, val_dataset = split_dataset(
                labeled_dataset, int(training_size * 0.9), order)

            train_loader = DataLoader(train_dataset, batch_size=batch_size, \
                                    num_workers=4, shuffle=True)
            valid_loader = DataLoader(val_dataset, batch_size=batch_size, \
                                    num_workers=4)

            dataloader_iterator = iter(train_loader)
            sample_batched = next(dataloader_iterator)

        inputs = sample_batched['image'].cuda()
        labels = sample_batched['hmap'].cuda()
        coors_bc = sample_batched['coor_1'].cpu().detach().numpy()

        # img_names = sample_batched['img_name']
        origin_imgs = sample_batched['origin_img']

        optimizer.zero_grad()
        outputs = model(inputs)
        loss = mse(outputs.float(), labels.float())

        # loss_bce = nn.functional.binary_cross_entropy(class_pred, class_real)
        # loss = (1-class_real) * loss_bce + class_real * (l * loss_bce + (1-l)*loss_mse)
        # torch.mean(loss).backward()
        loss.backward()
        optimizer.step()

        ############################  896 block  ############
        empty_batch = True
        for index, out in enumerate(outputs.cpu().detach().numpy()):
            w_center, h_center = heatmap_to_coor(out.squeeze())
            cropped_image, cropped_hmap = crop(origin_imgs[index], w_center,
                                               h_center, coors_bc[index],
                                               224 * 4)

            cropped_image = ToTensor()(Resize(
                (224,
                 224))(Image.fromarray(cropped_image.numpy()))).unsqueeze(0)
            cropped_hmap = cropped_hmap.unsqueeze(dim=0).unsqueeze(0)

            if empty_batch:
                cropped_image_batch = cropped_image
                cropped_hmap_batch = cropped_hmap
                empty_batch = False
            else:
                cropped_image_batch = torch.cat(
                    [cropped_image_batch, cropped_image])
                cropped_hmap_batch = torch.cat(
                    [cropped_hmap_batch, cropped_hmap])

        optimizer.zero_grad()
        outputs = model(cropped_image_batch.cuda())
        loss = mse(outputs, cropped_hmap_batch.cuda())

        torch.mean(loss).backward()
        # loss.backward()
        optimizer.step()
        ############################  448 block  ############
        empty_batch = True
        for index, out in enumerate(outputs.cpu().detach().numpy()):
            w_center, h_center = heatmap_to_coor(out.squeeze())
            cropped_image, cropped_hmap = crop(origin_imgs[index], w_center,
                                               h_center, coors_bc[index],
                                               224 * 2)

            cropped_image = ToTensor()(Resize(
                (224,
                 224))(Image.fromarray(cropped_image.numpy()))).unsqueeze(0)
            cropped_hmap = cropped_hmap.unsqueeze(dim=0).unsqueeze(0)

            if empty_batch:
                cropped_image_batch = cropped_image
                cropped_hmap_batch = cropped_hmap
                empty_batch = False
            else:
                cropped_image_batch = torch.cat(
                    [cropped_image_batch, cropped_image])
                cropped_hmap_batch = torch.cat(
                    [cropped_hmap_batch, cropped_hmap])

        optimizer.zero_grad()
        outputs = model(cropped_image_batch.cuda())
        loss = mse(outputs, cropped_hmap_batch.cuda())
        torch.mean(loss).backward()
        # loss.backward()
        optimizer.step()
        ############################  224 block  ############
        empty_batch = True
        for index, out in enumerate(outputs.cpu().detach().numpy()):
            w_center, h_center = heatmap_to_coor(out.squeeze())
            cropped_image, cropped_hmap = crop(origin_imgs[index], w_center,
                                               h_center, coors_bc[index], 224)
            cropped_image = ToTensor()(
                cropped_image.unsqueeze(dim=-1).numpy()).unsqueeze(0)
            cropped_hmap = cropped_hmap.unsqueeze(dim=0).unsqueeze(0)

            if empty_batch:
                cropped_image_batch = cropped_image
                cropped_hmap_batch = cropped_hmap
                empty_batch = False
            else:
                cropped_image_batch = torch.cat(
                    [cropped_image_batch, cropped_image])
                cropped_hmap_batch = torch.cat(
                    [cropped_hmap_batch, cropped_hmap])

        optimizer.zero_grad()
        outputs = model(cropped_image_batch.cuda())
        loss = mse(outputs, cropped_hmap_batch.cuda())

        # torch.mean(loss).backward()
        loss.backward()
        optimizer.step()

        ############################  112 block  ############
        empty_batch = True
        for index, out in enumerate(outputs.cpu().detach().numpy()):
            w_center, h_center = heatmap_to_coor(out.squeeze())
            cropped_image, cropped_hmap = crop(origin_imgs[index], w_center,
                                               h_center, coors_bc[index],
                                               int(224 / 2))

            cropped_image = ToTensor()(Resize(
                (224,
                 224))(Image.fromarray(cropped_image.numpy()))).unsqueeze(0)
            cropped_hmap = cropped_hmap.unsqueeze(dim=0).unsqueeze(0)

            if empty_batch:
                cropped_image_batch = cropped_image
                cropped_hmap_batch = cropped_hmap
                empty_batch = False
            else:
                cropped_image_batch = torch.cat(
                    [cropped_image_batch, cropped_image])
                cropped_hmap_batch = torch.cat(
                    [cropped_hmap_batch, cropped_hmap])

        optimizer.zero_grad()
        outputs = model(cropped_image_batch.cuda())
        loss = mse(outputs, cropped_hmap_batch.cuda())
        torch.mean(loss).backward()
        optimizer.step()

        ############################  56 block  ############
        empty_batch = True
        for index, out in enumerate(outputs.cpu().detach().numpy()):
            w_center, h_center = heatmap_to_coor(out.squeeze())
            cropped_image, cropped_hmap = crop(origin_imgs[index], w_center,
                                               h_center, coors_bc[index],
                                               int(224 / 4))

            cropped_image = ToTensor()(Resize(
                (224,
                 224))(Image.fromarray(cropped_image.numpy()))).unsqueeze(0)
            cropped_hmap = cropped_hmap.unsqueeze(dim=0).unsqueeze(0)

            if empty_batch:
                cropped_image_batch = cropped_image
                cropped_hmap_batch = cropped_hmap
                empty_batch = False
            else:
                cropped_image_batch = torch.cat(
                    [cropped_image_batch, cropped_image])
                cropped_hmap_batch = torch.cat(
                    [cropped_hmap_batch, cropped_hmap])

        optimizer.zero_grad()
        outputs = model(cropped_image_batch.cuda())
        loss = mse(outputs, cropped_hmap_batch.cuda())
        torch.mean(loss).backward()
        optimizer.step()

        if (epoch + 1) % 5 == 0:  # every 20 mini-batches...
            e_distance = 0
            for index, out in enumerate(outputs):
                x, y = heatmap_to_coor(
                    out.reshape(224, 224).cpu().detach().numpy())
                e_distance += ((int(x/224*1280)-coors_bc[index][0])**2 + \
                                (int(y/224*1024)-coors_bc[index][1])**2)**0.5

            print('Train epoch: {}\tLoss: {:.30f}'.format(
                epoch + 1,
                # i_batch * len(inputs),
                # len(train_loader.dataset), 100. * i_batch / len(train_loader),
                torch.mean(loss).item()))  #/ len(inputs)))
            # writer.add_scalar("cascade4_training_loss", \
            # torch.mean(loss).item(), #/ len(inputs), \
            # epoch  + epoch * math.ceil(len(train_loader) / batch_size) \
            # )
            # writer.add_scalar("cascade4_training_Euclidean_Distance", \
            # e_distance,
            # epoch  + epoch * math.ceil(len(train_loader) / batch_size) \
            # )

        if (epoch + 1) % 50 == 0:  # every 20 mini-batches...
            # model.eval()
            with torch.no_grad():
                valid_loss = 0
                total_acc_x = 0
                total_acc_y = 0
                e_distance = 0
                for i, batch in enumerate(valid_loader):
                    inputs = batch['image'].float().cuda()
                    labels = batch['hmap'].float().cuda()
                    coors_bc = batch['coor_1'].cpu().detach().numpy()

                    outputs = model(inputs)
                    loss = mse(outputs, labels)

                    outputs = outputs.cpu().detach().numpy()
                    labels = labels.cpu().detach().numpy()

                    sum_acc_x, sum_acc_y, list_acc_x, list_acc_y = accuracy_sum(
                        outputs, coors_bc)
                    total_acc_x += sum_acc_x
                    total_acc_y += sum_acc_y
                    # all_acc_x.extend(list_acc_x)
                    # all_acc_y.extend(list_acc_y)

                    for index, out in enumerate(outputs):
                        x, y = heatmap_to_coor(out.reshape(224, 224))
                        e_distance += ((int(x/224*1280)-coors_bc[index][0])**2 + \
                                        (int(y/224*1024)-coors_bc[index][1])**2)**0.5

                valid_loss = valid_loss / len(valid_loader)
                print('Valid loss {}'.format(valid_loss))

                # writer.add_scalar("Valid_loss_adbc", valid_loss, epoch)
                # writer.add_scalar("Valid_adbc_Euclidean_Distance", e_distance/len(valid_loader.dataset), epoch)

                print("=" * 30)
                print("total acc_x = {:.10f}".format(
                    total_acc_x / len(valid_loader.dataset)))
                print("total acc_y = {:.10f}".format(
                    total_acc_y / len(valid_loader.dataset)))
                print("Euclidean Distance: {}".format(
                    e_distance / len(valid_loader.dataset)))
                print("=" * 30)

                # if total_acc_x > max_total_acc_x:
                # max_total_acc_x = total_acc_x
                if e_distance / len(
                        valid_loader.dataset) < max_euclidean_distance:
                    max_euclidean_distance = e_distance / len(
                        valid_loader.dataset)
                    torch.save(model.state_dict(), saved_weight_dir)
                    print('model saved to ' + saved_weight_dir)
Example #4
0
def train():
    train_loader = DataLoader(train_dataset, batch_size=batch_size, \
                        num_workers=4, shuffle=True)
    # model.load_state_dict(torch.load(saved_weights))
    max_total_acc_x = 0
    for epoch in range(num_epochs):
        dataloader_iterator = iter(train_loader)
        try:
            sample_batched = next(dataloader_iterator)
        except StopIteration:
            train_loader = DataLoader(train_dataset, batch_size=batch_size, \
                                num_workers=4, shuffle=True)
            dataloader_iterator = iter(train_loader)
            sample_batched = next(dataloader_iterator)

        # model.train()
        train_loss = 0
        # print('Start epoch {}'.format(epoch))
        # for i_batch, sample_batched in enumerate(train_loader):
        # https://pytorch.org/docs/stable/notes/cuda.html
        inputs = sample_batched['image'].cuda()
        labels = sample_batched['hmap'].cuda()
        coors = sample_batched['coor'].cpu().detach().numpy()

        optimizer.zero_grad()
        outputs = model(inputs)
        loss = criterion(outputs, labels)
        loss.backward()
        optimizer.step()

        train_loss += loss.item()

        if epoch % 1 == 0:  # every 20 mini-batches...
            e_distance = 0
            for index, out in enumerate(outputs):
                x, y = heatmap_to_coor(
                    out.reshape(224, 224).cpu().detach().numpy())
                e_distance += ((int(x/224*1280)-coors[index][0])**2 + \
                                (int(y/224*1024)-coors[index][1])**2)**0.5

            print('Train epoch: {}\tLoss: {:.30f}'.format(
                epoch,
                # i_batch * len(inputs),
                # len(train_loader.dataset), 100. * i_batch / len(train_loader),
                loss.item()))  #/ len(inputs)))
            writer.add_scalar("bootstrap_training_loss", \
                    loss.item(), #/ len(inputs), \
                    epoch  + epoch * math.ceil(len(train_loader) / batch_size) \
                    )
            writer.add_scalar("bootstrap_training_Euclidean_Distance", \
                    e_distance/len(outputs),
                    epoch  + epoch * math.ceil(len(train_loader) / batch_size) \
                    )

        if epoch % 20 == 0:  # every 20 mini-batches...
            # model.eval()
            with torch.no_grad():
                valid_loss = 0
                total_acc_x = 0
                total_acc_y = 0
                e_distance = 0
                for i, batch in enumerate(valid_loader):
                    inputs = batch['image'].float().cuda()
                    labels = batch['hmap'].float().cuda()
                    coors = batch['coor'].numpy()
                    outputs = model(inputs)
                    valid_loss += criterion(outputs, labels)

                    outputs = outputs.cpu().detach().numpy()
                    labels = labels.cpu().detach().numpy()
                    acc_x, acc_y = accuracy_sum(outputs, coors)
                    total_acc_x += acc_x
                    total_acc_y += acc_y
                    for index, out in enumerate(outputs):
                        x, y = heatmap_to_coor(out.reshape(224, 224))
                        e_distance += ((int(x/224*1280)-coors[index][0])**2 + \
                                        (int(y/224*1024)-coors[index][1])**2)**0.5

                valid_loss = valid_loss / len(valid_loader)
                print('Valid loss {}'.format(valid_loss))

                writer.add_scalar("bootstrap_Valid_loss", valid_loss, epoch)
                writer.add_scalar("bootstrap_Valid_Euclidean_Distance",
                                  valid_loss, epoch)

                print("=" * 30)
                print("total acc_x = {:.10f}".format(
                    total_acc_x / len(valid_loader.dataset)))
                print("total acc_y = {:.10f}".format(
                    total_acc_y / len(valid_loader.dataset)))
                print("Euclidean Distance: {}".format(
                    e_distance / len(valid_loader.dataset)))
                print("=" * 30)

                if total_acc_x > max_total_acc_x:
                    max_total_acc_x = total_acc_x
                    torch.save(model.state_dict(), saved_weight_dir)
                    print('model saved to ' + saved_weight_dir)
Example #5
0
def train():
    train_loader = DataLoader(train_dataset, batch_size=batch_size, \
                        num_workers=4, shuffle=True)
    # model.load_state_dict(torch.load(saved_weights))
    max_total_acc_x = 0
    max_euclidean_distance = 99999
    for epoch in range(num_epochs):
        dataloader_iterator = iter(train_loader)
        try:
            sample_batched = next(dataloader_iterator)
        except StopIteration:
            train_loader = DataLoader(train_dataset, batch_size=batch_size, \
                                num_workers=4, shuffle=True)
            dataloader_iterator = iter(train_loader)
            sample_batched = next(dataloader_iterator)

        inputs = sample_batched['image'].cuda()
        labels = sample_batched['hmap'].cuda()
        coors_bc = sample_batched['coor_bc'].cpu().detach().numpy()
        class_real = sample_batched['class_real'].cuda()
        gt_dx_dy = sample_batched['dx_dy'].numpy()

        img_names = sample_batched['img_name']
        origin_imgs = sample_batched['origin_img']

        w = origin_imgs.shape[2]
        h = origin_imgs.shape[1]

        dx = gt_dx_dy[:, :1] / w * 224
        dy = gt_dx_dy[:, 1:] / h * 224

        gt_dx_dy = torch.from_numpy(np.concatenate((dx, dy),
                                                   axis=1)).cuda().float()

        optimizer.zero_grad()
        outputs, pred_dx_dy = model(inputs)

        loss_hmap = mse(outputs, labels)
        loss_dx_dy = mse(pred_dx_dy, gt_dx_dy)
        loss = loss_hmap + loss_dx_dy

        # loss_bce = nn.functional.binary_cross_entropy(class_pred, class_real)
        # loss = (1-class_real) * loss_bce + class_real * (l * loss_bce + (1-l)*loss_mse)
        torch.mean(loss).backward()
        optimizer.step()

        ############################  896 block  ############
        # empty_batch = True
        # for index, out in enumerate(outputs.cpu().detach().numpy()):
        # w_center, h_center = heatmap_to_coor(out.squeeze())
        # cropped_image, cropped_hmap = crop(origin_imgs[index], w_center, h_center, coors[index], 224*4)

        # cropped_image = ToTensor()(Resize((224,224))(Image.fromarray(cropped_image.numpy()))).unsqueeze(0)
        # cropped_hmap = cropped_hmap.unsqueeze(dim=0).unsqueeze(0)

        # if empty_batch:
        # cropped_image_batch = cropped_image
        # cropped_hmap_batch = cropped_hmap
        # empty_batch = False
        # else:
        # cropped_image_batch = torch.cat([cropped_image_batch, cropped_image])
        # cropped_hmap_batch = torch.cat([cropped_hmap_batch, cropped_hmap])

        # optimizer.zero_grad()
        # outputs, class_pred, horizontal_confidence, vertical_confidence = model(cropped_image_batch.cuda())
        # loss = mse(outputs, cropped_hmap_batch.cuda())

        # # loss_mse = mse(outputs, cropped_hmap_batch.cuda())
        # # loss_bce = nn.functional.binary_cross_entropy(class_pred, class_real)
        # # loss = (1-class_real) * loss_bce + class_real * (l * loss_bce + (1-l)*loss_mse)
        # torch.mean(loss).backward()
        # # loss.backward()
        # optimizer.step()
        ############################  448 block  ############
        # empty_batch = True
        # for index, out in enumerate(outputs.cpu().detach().numpy()):
        # w_center, h_center = heatmap_to_coor(out.squeeze())
        # cropped_image, cropped_hmap = crop(origin_imgs[index], w_center, h_center, coors[index], 224*2)

        # cropped_image = ToTensor()(Resize((224,224))(Image.fromarray(cropped_image.numpy()))).unsqueeze(0)
        # cropped_hmap = cropped_hmap.unsqueeze(dim=0).unsqueeze(0)

        # if empty_batch:
        # cropped_image_batch = cropped_image
        # cropped_hmap_batch = cropped_hmap
        # empty_batch = False
        # else:
        # cropped_image_batch = torch.cat([cropped_image_batch, cropped_image])
        # cropped_hmap_batch = torch.cat([cropped_hmap_batch, cropped_hmap])

        # optimizer.zero_grad()
        # outputs, class_pred = model(cropped_image_batch.cuda())
        # loss = mse(outputs, cropped_hmap_batch.cuda())
        # # loss_mse = mse(outputs, cropped_hmap_batch.cuda())
        # # loss_bce = nn.functional.binary_cross_entropy(class_pred, class_real)
        # # loss = (1-class_real) * loss_bce + class_real * (l * loss_bce + (1-l)*loss_mse)
        # torch.mean(loss).backward()
        # # loss.backward()
        # optimizer.step()
        ############################  224 block  ############
        empty_batch = True
        for index, out in enumerate(outputs.cpu().detach().numpy()):
            w_center, h_center = heatmap_to_coor(out.squeeze())
            cropped_image, cropped_hmap = crop(origin_imgs[index], w_center,
                                               h_center, coors_bc[index], 224)
            cropped_image = ToTensor()(
                cropped_image.unsqueeze(dim=-1).numpy()).unsqueeze(0)
            cropped_hmap = cropped_hmap.unsqueeze(dim=0).unsqueeze(0)

            if empty_batch:
                cropped_image_batch = cropped_image
                cropped_hmap_batch = cropped_hmap
                empty_batch = False
            else:
                cropped_image_batch = torch.cat(
                    [cropped_image_batch, cropped_image])
                cropped_hmap_batch = torch.cat(
                    [cropped_hmap_batch, cropped_hmap])

        optimizer.zero_grad()
        # outputs, class_pred = model(cropped_image_batch.cuda())
        outputs, pred_dx_dy = model(cropped_image_batch.cuda())
        loss_hmap = mse(outputs, cropped_hmap_batch.cuda())
        loss_dx_dy = mse(pred_dx_dy, gt_dx_dy)
        loss = loss_hmap + loss_dx_dy

        # outputs_np = outputs.cpu().detach().numpy()
        # labels = labels.cpu().detach().numpy()
        # sum_acc_x, sum_acc_y, list_acc_x, list_acc_y = accuracy_sum(outputs_np, coors)
        # acc_y = torch.from_numpy(np.asarray(list_acc_y)).reshape(-1, 1).cuda()
        # loss_y_score = mse(vertical_confidence.double(), acc_y)
        # loss = loss + 1e-6 * loss_y_score

        # loss_bce = nn.functional.binary_cross_entropy(class_pred, class_real)
        # loss = (1-class_real) * loss_bce + class_real * (l * loss_bce + (1-l)*loss_mse)
        torch.mean(loss).backward()
        # loss.backward()
        optimizer.step()

        ############################  112 block  ############
        # empty_batch = True
        # for index, out in enumerate(outputs.cpu().detach().numpy()):
        # w_center, h_center = heatmap_to_coor(out.squeeze())
        # cropped_image, cropped_hmap = crop(origin_imgs[index], w_center, h_center, coors[index], int(224/2))

        # cropped_image = ToTensor()(Resize((224,224))(Image.fromarray(cropped_image.numpy()))).unsqueeze(0)
        # cropped_hmap = cropped_hmap.unsqueeze(dim=0).unsqueeze(0)

        # if empty_batch:
        # cropped_image_batch = cropped_image
        # cropped_hmap_batch = cropped_hmap
        # empty_batch = False
        # else:
        # cropped_image_batch = torch.cat([cropped_image_batch, cropped_image])
        # cropped_hmap_batch = torch.cat([cropped_hmap_batch, cropped_hmap])

        # optimizer.zero_grad()
        # outputs, class_pred = model(cropped_image_batch.cuda())
        # loss_mse = criterion(outputs, cropped_hmap_batch.cuda())
        # loss_bce = nn.functional.binary_cross_entropy(class_pred, class_real)
        # loss = (1-class_real) * loss_bce + class_real * (l * loss_bce + (1-l)*loss_mse)
        # torch.mean(loss).backward()
        # optimizer.step()

        ############################  56 block  ############
        # empty_batch = True
        # for index, out in enumerate(outputs.cpu().detach().numpy()):
        # w_center, h_center = heatmap_to_coor(out.squeeze())
        # cropped_image, cropped_hmap = crop(origin_imgs[index], w_center, h_center, coors[index], int(224/4))

        # cropped_image = ToTensor()(Resize((224,224))(Image.fromarray(cropped_image.numpy()))).unsqueeze(0)
        # cropped_hmap = cropped_hmap.unsqueeze(dim=0).unsqueeze(0)

        # if empty_batch:
        # cropped_image_batch = cropped_image
        # cropped_hmap_batch = cropped_hmap
        # empty_batch = False
        # else:
        # cropped_image_batch = torch.cat([cropped_image_batch, cropped_image])
        # cropped_hmap_batch = torch.cat([cropped_hmap_batch, cropped_hmap])

        # optimizer.zero_grad()
        # outputs, class_pred = model(cropped_image_batch.cuda())
        # loss_mse = criterion(outputs, cropped_hmap_batch.cuda())
        # loss_bce = nn.functional.binary_cross_entropy(class_pred, class_real)
        # loss = (1-class_real) * loss_bce + class_real * (l * loss_bce + (1-l)*loss_mse)
        # torch.mean(loss).backward()
        # optimizer.step()

        if (epoch + 1) % 5 == 0:  # every 20 mini-batches...
            e_distance = 0
            for index, out in enumerate(outputs):
                x, y = heatmap_to_coor(
                    out.reshape(224, 224).cpu().detach().numpy())
                e_distance += ((int(x/224*1280)-coors_bc[index][0])**2 + \
                                (int(y/224*1024)-coors_bc[index][1])**2)**0.5

            print('Train epoch: {}\tLoss: {:.30f}'.format(
                epoch + 1,
                # i_batch * len(inputs),
                # len(train_loader.dataset), 100. * i_batch / len(train_loader),
                torch.mean(loss).item()))  #/ len(inputs)))
            writer.add_scalar("cascade4_training_loss", \
                    torch.mean(loss).item(), #/ len(inputs), \
                    epoch  + epoch * math.ceil(len(train_loader) / batch_size) \
                    )
            writer.add_scalar("cascade4_training_Euclidean_Distance", \
                    e_distance,
                    epoch  + epoch * math.ceil(len(train_loader) / batch_size) \
                    )

        if (epoch + 1) % 50 == 0:  # every 20 mini-batches...
            # model.eval()
            with torch.no_grad():
                valid_loss = 0
                total_acc_x = 0
                total_acc_y = 0
                e_distance = 0
                for i, batch in enumerate(valid_loader):
                    inputs = batch['image'].float().cuda()
                    labels = batch['hmap'].float().cuda()

                    coors_bc = sample_batched['coor_bc'].cpu().detach().numpy()
                    gt_dx_dy = sample_batched['dx_dy'].numpy()

                    w = origin_imgs.shape[2]
                    h = origin_imgs.shape[1]

                    dx = gt_dx_dy[:, :1] / w * 224
                    dy = gt_dx_dy[:, 1:] / h * 224

                    gt_dx_dy = torch.from_numpy(
                        np.concatenate((dx, dy), axis=1)).cuda().float()

                    outputs, pred_dx_dy = model(inputs)
                    loss_hmap = mse(outputs, labels)
                    loss_dx_dy = mse(pred_dx_dy, gt_dx_dy)
                    valid_loss += loss_hmap + loss_dx_dy
                    # loss_mse = mse(outputs, labels.cuda())
                    # loss_bce = nn.functional.binary_cross_entropy(class_pred, class_real)
                    # loss = (1-class_real) * loss_bce + class_real * (l * loss_bce + (1-l)*loss_mse)
                    # valid_loss += torch.mean(loss)

                    outputs = outputs.cpu().detach().numpy()
                    labels = labels.cpu().detach().numpy()

                    sum_acc_x, sum_acc_y, list_acc_x, list_acc_y = accuracy_sum(
                        outputs, coors_bc)
                    total_acc_x += sum_acc_x
                    total_acc_y += sum_acc_y
                    # all_acc_x.extend(list_acc_x)
                    # all_acc_y.extend(list_acc_y)

                    for index, out in enumerate(outputs):
                        x, y = heatmap_to_coor(out.reshape(224, 224))
                        e_distance += ((int(x/224*1280)-coors_bc[index][0])**2 + \
                                        (int(y/224*1024)-coors_bc[index][1])**2)**0.5

                valid_loss = valid_loss / len(valid_loader)
                print('Valid loss {}'.format(valid_loss))

                writer.add_scalar("Valid_loss_adbc", valid_loss, epoch)
                writer.add_scalar("Valid_adbc_Euclidean_Distance",
                                  e_distance / len(valid_loader.dataset),
                                  epoch)

                print("=" * 30)
                print("total acc_x = {:.10f}".format(
                    total_acc_x / len(valid_loader.dataset)))
                print("total acc_y = {:.10f}".format(
                    total_acc_y / len(valid_loader.dataset)))
                print("Euclidean Distance: {}".format(
                    e_distance / len(valid_loader.dataset)))
                print("=" * 30)

                # if total_acc_x > max_total_acc_x:
                # max_total_acc_x = total_acc_x
                if e_distance / len(
                        valid_loader.dataset) < max_euclidean_distance:
                    max_euclidean_distance = e_distance / len(
                        valid_loader.dataset)
                    torch.save(model.state_dict(), saved_weight_dir)
                    print('model saved to ' + saved_weight_dir)
Example #6
0
def train_gan():
    correct_rate = 0
    max_e_distance = 9999
    correct = 0
    total = 0
    for epoch in range(num_epochs):
        welding_train_loader = DataLoader(train_dataset, batch_size=batch_size, \
                                num_workers=0, shuffle=True)

        dataloader_iterator = iter(welding_train_loader)
        try:
            sample_batched= next(dataloader_iterator)
        except StopIteration:
            welding_train_loader = DataLoader(train_dataset, batch_size=batch_size, \
                                num_workers=0, shuffle=True)
            dataloader_iterator = iter(welding_train_loader)
            sample_batched = next(dataloader_iterator)
        inputs = sample_batched['image']#.cuda()
        real_hmaps = sample_batched['hmap']#.cuda()
        coors_bc = sample_batched['coor_bc']
        random_hmaps_pseudo = sample_batched['random_hmaps_pseudo']
        # random_coors = sample_batched['random_coors']
        random_hmaps_real = sample_batched['random_hmaps_real']
        # inames = sample_batched['img_name']


        pseudo_batch = None
        real_batch = None
        for idx, input in enumerate(inputs):
            input = input.cuda()

            pseudo_hmap = student(input.unsqueeze(0)).squeeze(0).detach().cpu().numpy()
            _, y, x = np.unravel_index(pseudo_hmap.argmax(), pseudo_hmap.shape)
            pseudo_hmap = gaussion_hmap(x, y)
            pseudo_hmap = torch.from_numpy(pseudo_hmap)
            pseudo_hmap = nn.Softmax(1)(pseudo_hmap.view(-1, 224*224)).view(1, 224, 224)
            pseudo_hmap = pseudo_hmap.cuda()
            real_hmap = real_hmaps[idx].cuda()

            pseudo = torch.cat([input, pseudo_hmap.float()], dim=0).unsqueeze(0)
            real = torch.cat([input.float(), real_hmap.float()], dim=0).unsqueeze(0)

            x, y = heatmap_to_coor(pseudo_hmap.detach().cpu().numpy().reshape(224, 224))
            e_distance = ((int(x/224*1280) - coors_bc[idx][0].item())**2 + \
                         (int(y/224*1024)-coors_bc[idx][1].item())**2)**0.5   


            pseudo_1 = torch.cat([input, random_hmaps_pseudo[0][idx].float().cuda()], dim=0).unsqueeze(0)
            pseudo_2 = torch.cat([input, random_hmaps_pseudo[1][idx].float().cuda()], dim=0).unsqueeze(0)
            pseudo_3 = torch.cat([input, random_hmaps_pseudo[2][idx].float().cuda()], dim=0).unsqueeze(0)
            pseudo_4 = torch.cat([input, random_hmaps_pseudo[3][idx].float().cuda()], dim=0).unsqueeze(0)
            pseudo_5 = torch.cat([input, random_hmaps_pseudo[4][idx].float().cuda()], dim=0).unsqueeze(0)
            pseudo_6 = torch.cat([input, random_hmaps_pseudo[5][idx].float().cuda()], dim=0).unsqueeze(0)
            pseudo_7 = torch.cat([input, random_hmaps_pseudo[6][idx].float().cuda()], dim=0).unsqueeze(0)
            pseudo_8 = torch.cat([input, random_hmaps_pseudo[7][idx].float().cuda()], dim=0).unsqueeze(0)
            pseudo_9 = torch.cat([input, random_hmaps_pseudo[8][idx].float().cuda()], dim=0).unsqueeze(0)
            pseudo_10 = torch.cat([input, random_hmaps_pseudo[9][idx].float().cuda()], dim=0).unsqueeze(0)

            real_1 = torch.cat([input, random_hmaps_real[0][idx].float().cuda()], dim=0).unsqueeze(0)
            real_2 = torch.cat([input, random_hmaps_real[1][idx].float().cuda()], dim=0).unsqueeze(0)
            real_3 = torch.cat([input, random_hmaps_real[2][idx].float().cuda()], dim=0).unsqueeze(0)
            real_4 = torch.cat([input, random_hmaps_real[3][idx].float().cuda()], dim=0).unsqueeze(0)
            real_5 = torch.cat([input, random_hmaps_real[4][idx].float().cuda()], dim=0).unsqueeze(0)
            real_6 = torch.cat([input, random_hmaps_real[5][idx].float().cuda()], dim=0).unsqueeze(0)
            real_7 = torch.cat([input, random_hmaps_real[6][idx].float().cuda()], dim=0).unsqueeze(0)
            real_8 = torch.cat([input, random_hmaps_real[7][idx].float().cuda()], dim=0).unsqueeze(0)
            real_9 = torch.cat([input, random_hmaps_real[8][idx].float().cuda()], dim=0).unsqueeze(0)
            real_10 = torch.cat([input, random_hmaps_real[9][idx].float().cuda()], dim=0).unsqueeze(0)

            if e_distance < dist_lower_bound:
                # real = torch.cat([real, pseudo]) 
                pseudo = torch.cat([pseudo_1, pseudo_2, pseudo_3, pseudo_4, pseudo_4,  \
                                pseudo_4, pseudo_4, pseudo_4, pseudo_4, pseudo_4,  ]) 
                real = torch.cat([real, pseudo, real_1, real_2, real_3, real_4, real_4,  \
                                real_4, real_4, real_4, real_4, real_4,  ]) 
            else:
                pseudo = torch.cat([pseudo, pseudo_1, pseudo_2, pseudo_3, pseudo_4, pseudo_4,  \
                                pseudo_4, pseudo_4, pseudo_4, pseudo_4, pseudo_4,  ]) 
                real = torch.cat([real, real_1, real_2, real_3, real_4, real_4,  \
                                real_4, real_4, real_4, real_4, real_4,  ]) 
            try:
                pseudo_batch = torch.cat([pseudo_batch, pseudo], dim=0)
                real_batch = torch.cat([real_batch, real]) 
            except:
                pseudo_batch = pseudo
                real_batch = real

        D_real = teacher(real_batch)
        D_fake = teacher(pseudo_batch)
        ones_label = torch.ones(real_batch.shape[0], 1).cuda()
        zeros_label = torch.zeros(pseudo_batch.shape[0], 1).cuda()

        D_loss_real = nn.functional.binary_cross_entropy(D_real, ones_label)
        D_loss_fake = nn.functional.binary_cross_entropy(D_fake, zeros_label)

        # D_loss_real = nn.functional.mse_loss(D_real, ones_label)
        # D_loss_fake = nn.functional.mse_loss(D_fake, zeros_label)

        D_loss = D_loss_real + D_loss_fake


        D_loss.backward()
        D_solver.step()

        D_solver.zero_grad()
    
        # correct += (torch.max(predicted, 1)[1] == torch.max(labels, 1)[1]).sum().item()
        correct += (D_real>=0.5).sum().item() + (D_fake<0.5).sum().item()
        # set_trace()
        total += real_batch.shape[0] + pseudo_batch.shape[0]




        if (epoch+1) % 20 == 0:    # every 20 mini-batches...

            print('Train epoch {}:\tD_loss: {:.30f} acc_training_D: {:.30f}%  {}/{}'.format(
                    epoch,
                    # G_loss.item(),
                    D_loss.item(),
                    100 * correct / total,
                    correct,
                    total))


            # writer_gan.add_scalar("advererial_gan_G_loss", \
                    # G_loss.item(), #/ len(inputs), \
                    # epoch * math.ceil(len(welding_train_loader) / batch_size) \
                    # )

            writer_gan.add_scalar("adv_loss_bce", \
                    D_loss.item(), #/ len(inputs), \
                    epoch * math.ceil(len(welding_train_loader) / batch_size) \
                    )

            writer_gan.add_scalar("adv_acc_bce", \
                    100 * correct / total, #/ len(inputs), \
                    epoch * math.ceil(len(welding_train_loader) / batch_size) \
                    )
            correct = 0 
            total = 0
        if (epoch+1) % 100 == 0:    # every 20 mini-batches...
            torch.save(teacher.state_dict(), dir_weight)
            print('model saved to ' + dir_weight)