Ejemplo n.º 1
0
    def predict(self):

        self._resume_ckpt()

        self.model.eval()
        predict_time = AverageMeter()
        batch_time = AverageMeter()
        data_time = AverageMeter()

        with torch.no_grad():
            tic = time.time()
            for steps, (data, filenames) in enumerate(self.dataloader_predict, start=1):
                # data
                data = data.to(self.device, non_blocking = True)
                data_time.update(time.time() - tic)

                pre_tic = time.time()
                logits = self.model(data)
                predict_time.update(time.time() - pre_tic)
                self._save_pred(logits, filenames)

                batch_time.update(time.time() - tic)
                tic = time.time()

            print("Predicting and Saving Done!\n"
                  "Total Time: {:.2f}\n"
                  "Data Time: {:.2f}\n"
                  "Pre Time: {:.2f}"
                  .format(batch_time._get_sum(), data_time._get_sum(), predict_time._get_sum()))
Ejemplo n.º 2
0
    def eval_and_predict(self):

        self._resume_ckpt()

        self.model.eval()

        #predictions = []
        #filenames = []
        predict_time = AverageMeter()
        batch_time = AverageMeter()
        data_time = AverageMeter()
        ave_total_loss = AverageMeter()
        ave_acc = AverageMeter()
        ave_iou = AverageMeter()

        with torch.no_grad():
            tic = time.time()
            for steps, (data, target, filename) in enumerate(self.test_data_loader,start=1):

                # data
                data = data.to(self.device, non_blocking=True)
                target = target.to(self.device, non_blocking=True)
                data_time.update(time.time()-tic)

                # output, loss, and metrics
                pre_tic = time.time()
                logits = self.model(data)
                self._save_pred(logits, filename)
                predict_time.update(time.time()-pre_tic)

                loss = self.loss(logits, target)
                acc = Accuracy(logits, target)
                miou = MIoU(logits, target, self.config.nb_classes)

                # update ave loss and metrics
                batch_time.update(time.time()-tic)
                tic = time.time()

                ave_total_loss.update(loss.data.item())
                ave_acc.update(acc)
                ave_iou.update(miou)

            # display evaluation result at the end
            print('Evaluation phase !\n'
                  'Time: {:.2f},  Data: {:.2f},\n'
                  'MIoU: {:6.4f}, Accuracy: {:6.4f}, Loss: {:.6f}'
                  .format(batch_time.average(), data_time.average(),
                          ave_iou.average(), ave_acc.average(), ave_total_loss.average()))
            #print('Saving Predict Map ... ...')
            #self._save_pred(predictions, filenames)
            print('Prediction Phase !\n'
                  'Total Time cost: {}s\n'
                  'Average Time cost per batch: {}s!'
                  .format(predict_time._get_sum(), predict_time.average()))


        self.history['eval']['loss'].append(ave_total_loss.average())
        self.history['eval']['acc'].append(ave_acc.average())
        self.history['eval']['miou'].append(ave_iou.average())
        self.history['eval']['time'].append(predict_time.average())

        #TODO
        print("     + Saved history of evaluation phase !")
        hist_path = os.path.join(self.test_log_path, "history1.txt")
        with open(hist_path, 'w') as f:
            f.write(str(self.history))
def inference_test_both():

    #torch.manual_seed(1)
    #torch.backends.cudnn.enabled = False
    #torch.backends.cudnn.benchmark = True
    #torch.backends.cudnn.deterministic=True

    random_input = torch.randn((1, 3, 256, 256))
    random_output = torch.randint(low=0, high=10,
                                  size=(1, 256,
                                        256))  # for 0,1,2,3,4,5,6,7,8,9
    #random_output = torch.randn((1,3,256,256))

    net1 = normal_convnet().to('cuda:0')
    net2 = sep_convnet().to('cuda:0')

    # params = 0
    #params_normal = sum(p.numel() for p in normal_net.parameters() if p.requires_grad)
    #print("Trainable Parameters :", params_normal)

    #criterion = nn.MSELoss().to('cuda:0')
    criterion = nn.CrossEntropyLoss().to('cuda:0')

    #optimizer_1 = torch.optim.Adam(params=net1.parameters(), lr=0.1)
    #optimizer_2 = torch.optim.Adam(params=net2.parameters(), lr=0.1)
    optimizer_1 = torch.optim.SGD(params=net1.parameters(), lr=0.1)
    optimizer_2 = torch.optim.SGD(params=net2.parameters(), lr=0.2)
    #

    cost1 = AverageMeter()
    cost2 = AverageMeter()
    print("Simulate Training ... ...")

    input1 = random_input.to('cuda:0')
    target1 = random_output.to('cuda:0')
    torch.cuda.synchronize()
    tic = time.time()
    optimizer_1.zero_grad()
    output1 = net1(input1)
    loss = criterion(output1, target1)
    loss.backward()
    optimizer_1.step()
    torch.cuda.synchronize()
    cost1.update(time.time() - tic)

    #print(dw_net)
    #params_dw = sum(p.numel() for p in normal_net.parameters() if p.requires_grad)
    #print("Trainable Parameters :", params_dw)
    #optimizer_dw = torch.optim.Adam(params=dw_net.parameters())

    input2 = random_input.to('cuda:0')
    target2 = random_output.to('cuda:0')
    torch.cuda.synchronize()
    tic = time.time()
    optimizer_1.zero_grad()
    output2 = net2(input2)
    loss = criterion(output2, target2)
    loss.backward()
    optimizer_2.step()
    torch.cuda.synchronize()
    cost2.update(time.time() - tic)

    print("Done for All !")

    print("Trainable Parameters:\n"
          "Normal_conv2d: {}\n"
          "Sep_conv2d    : {}".format(parameters_sum(net1),
                                      parameters_sum(net2)))
    print("Inference Time cost:\n"
          "Normal_conv2d: {}s\n"
          "Sep_conv2d    : {}s".format(cost1._get_sum(), cost2._get_sum()))