Example #1
0
def load_paths(opt, base, pa, split):
    # ignoring the file with no label.
    print('\t-> processing {} data'.format(split))
    X = []
    Y = []
    Z = []
    # Z = [] (252, 316)
    bar = progbar(len(pa), width=opt.barwidth)
    for idx, patients in enumerate(pa):
        # file contains GT.nii // patient_xx.nii.gz
        gt_path = os.path.join(base, patients, 'GT.nii.gz')
        img_path = os.path.join(base, patients, patients + '.nii.gz')
        gt = nib.load(gt_path).get_data()
        for i in range(gt.shape[2]):
            if np.sum(gt[:, :, i]) == 0 and split == 'train':
                continue
            Z.append(((patients, i), gt_path, img_path))
        for i in range(gt.shape[0]):
            if np.sum(gt[i, :, :]) == 0 and split == 'train':
                continue
            if i < 162 or i > 413:
                continue
            X.append(((patients, i), gt_path, img_path))
        for i in range(gt.shape[1]):
            if np.sum(gt[:, i, :]) == 0 and split == 'train':
                continue
            if i < 85 or i > 401:
                continue
            Y.append(((patients, i), gt_path, img_path))
        bar.update(idx + 1)
    return X, Y, Z
Example #2
0
def processing(path, split):
    folders_all = []
    print("=> start collecting {} data....".format(split))
    for yr in [2016, 2017]:
        image_set_path = os.path.join(path, 'ImageSets', str(yr),
                                      split + '.txt')
        with open(image_set_path, 'r') as f:
            folders = f.read()
            folders = folders.split()
            folders_all += folders
    instance_all = []
    bar = progbar(len(folders_all), width=40)
    for idx, video in enumerate(folders_all):
        bar.update(idx + 1)
        image_path = os.path.join(path, 'JPEGImages', '480p', video)
        gt_path = os.path.join(path, 'Annotations', '480p', video)
        for frame in os.listdir(gt_path):
            if frame.endswith('.png'):
                ground_truth = np.array(
                    Image.open(os.path.join(gt_path, frame)))
                instance_num = np.unique(ground_truth).__len__()
            if np.all(ground_truth == 0):
                continue
            for i in range(instance_num):
                if i == 0:
                    continue
                gt_path_ = os.path.join(gt_path, frame)
                image_path_ = os.path.join(image_path,
                                           frame.replace('png', 'jpg'))
                instance_all.append((image_path_, gt_path_, video, frame, i))
    return instance_all
Example #3
0
        def step(epoch):
            self.model.train()
            bar = progbar(len(self.train_dataset), width=10)
            self.lr_schedulr.step(epoch=epoch)
            print('\n Training AT epoch = {}'.format(epoch))
            print('current learning rate = {}\n'.format(self.lr_schedulr.get_lr()))
            avgLoss = 0
            for i, (image, afmap) in enumerate(self.train_dataset):  
                self.optimizer.zero_grad()            
                image_var = Variable(image).cuda()
                afmap_var = Variable(afmap).cuda()                
                afmap_pred = self.model(image_var)
                loss = self.criterion(afmap_pred, afmap_var)
                loss.backward()
                self.optimizer.step()                
                avgLoss = (avgLoss*i + loss.item()) / (i+1)

                log = 'Epoch: [%d][%d/%d] Err %1.4f\n' % (epoch, i, len(self.train_dataset), avgLoss)
                self.logger['train'].write(log)

                bar.update(i, [('avgLoss', avgLoss)])
            
            log = '\n * Finished training epoch # %d     Loss: %1.4f\n' % (epoch, avgLoss)
            self.logger['train'].write(log)
            print(log)
            return avgLoss
Example #4
0
    def test(self, valLoader, epoch):
        self.model.eval()

        avgLoss = 0
        visImg = []

        self.progbar = progbar(len(valLoader), width=self.opt.barwidth)

        for i, (inputData, line, imgids) in enumerate(valLoader):
            if self.opt.debug and i > 10:
                break

            start = time.time()

            inputData_var, line_var = Variable(inputData), Variable(line)
            if self.opt.GPU:
                inputData_var = inputData_var.cuda()
                line_var = line_var.cuda()
            dataTime = time.time() - start

            loss, line_loss, line_result = self.model.forward(
                inputData_var, line_var)

            runTime = time.time() - start

            avgLoss = (avgLoss * i + loss.item()) / (i + 1)

            log = 'Epoch: [%d][%d/%d] Time %1.3f Data %1.3f Err %1.4f\n' % (
                epoch, i, len(valLoader), runTime, dataTime, loss.item())
            self.logger['val'].write(log)
            self.progbar.update(i, [('Time', runTime), ('Loss', loss.item())])

            if i <= self.opt.visTest:
                visImg.append(inputData.cpu())
                visImg.append(line_result.cpu().data)
                visImg.append(line)

            if i == self.opt.visTest:
                self.visualize(visImg, epoch, 'test',
                               valLoader.dataset.postprocess,
                               valLoader.dataset.postprocessLine)

            outDir = os.path.join(self.opt.resume, str(epoch))
            if not os.path.exists(outDir):
                os.makedirs(outDir)

            for j in range(len(imgids)):
                np.save(
                    os.path.join(outDir, imgids[j] + '_line.npy'),
                    valLoader.dataset.postprocessLine()(
                        line_result.cpu().data[j].numpy()))

        log = '\n * Finished testing epoch # %d      Loss: %1.4f\n' % (epoch,
                                                                       avgLoss)
        self.logger['val'].write(log)
        print(log)

        return avgLoss
Example #5
0
    def train(self, trainLoader, epoch):
        self.model.train()

        print("-> Training epoch # {}".format(str(epoch)))

        avgLoss = 0

        self.progbar = progbar(len(trainLoader), width=self.opt.barwidth)

        for i, (inputData, seg, imgids) in enumerate(trainLoader):
            if self.opt.debug and i > 10:
                break

            start = time.time()

            inputData_var, seg_var = Variable(inputData), Variable(seg)

            self.optimizer.zero_grad()

            if self.opt.GPU:
                inputData_var = inputData_var.cuda()
                seg_var = seg_var.cuda()

            dataTime = time.time() - start

            output = self.model.forward(inputData_var)

            loss = self.criterion(output,
                                  torch.gt(seg_var, 0).type(torch.float32))

            loss.backward()

            self.optimizer.step()

            runTime = time.time() - start

            avgLoss = (avgLoss * i + loss.data) / (i + 1)

            log = 'Epoch: [%d][%d/%d] Time %1.3f Data %1.3f Err %1.4f\n' % (
                epoch, i, len(trainLoader), runTime, dataTime, loss.data)

            self.logger['train'].write(log)

            self.progbar.update(i, [('Time', runTime), ('Loss', loss.data)])

        log = '\n * Finished training epoch # %d     Loss: %1.4f\n' % (epoch,
                                                                       avgLoss)
        self.logger['train'].write(log)
        print(log)

        return avgLoss
Example #6
0
    def train(self, trainLoader, epoch):
        self.model.train()

        print('=> Training epoch # ' + str(epoch))

        avgLoss = 0
        visImg = []

        self.progbar = progbar(len(trainLoader), width=self.opt.barwidth)

        for i, (inputData, line, imgids) in enumerate(trainLoader):
            if self.opt.debug and i > 10:
                break

            start = time.time()

            inputData_var, line_var = Variable(inputData), Variable(line)
            self.optimizer.zero_grad()
            if self.opt.GPU:
                inputData_var = inputData_var.cuda()
                line_var = line_var.cuda()
            dataTime = time.time() - start

            loss, line_loss, line_result = self.model.forward(
                inputData_var, line_var)

            loss.backward()
            self.optimizer.step()
            runTime = time.time() - start

            avgLoss = (avgLoss * i + loss.item()) / (i + 1)

            log = 'Epoch: [%d][%d/%d] Time %1.3f Data %1.3f Err %1.4f\n' % (
                epoch, i, len(trainLoader), runTime, dataTime, loss.item())
            self.logger['train'].write(log)
            self.progbar.update(i, [('Time', runTime), ('Loss', loss.item())])

            if i <= self.opt.visTrain:
                visImg.append(inputData)
                visImg.append(line_result.cpu().data)
                visImg.append(line)

            #if i == self.opt.visTrain:
            #    self.visualize(visImg, epoch, 'train', trainLoader.dataset.postprocess, trainLoader.dataset.postprocessLine)

        log = '\n * Finished training epoch # %d     Loss: %1.4f\n' % (epoch,
                                                                       avgLoss)
        self.logger['train'].write(log)
        print(log)

        return avgLoss
Example #7
0
    def test(self, cfg, epoch=-1):
        self.model.eval()

        self.load_weight_by_epoch(epoch)

        # self.model.cuda()

        for name, dataset in zip(cfg.DATASETS.TEST, self.test_dataset):

            print('Testing on {} dataset'.format(name.upper()))

            bar = progbar(target=len(dataset))
            start_time = time.time()
            for i, (image, lines, shape_info, fname) in enumerate(dataset):
                image_var = Variable(image).cuda()
                lines_var = Variable(lines).cuda()
                shape_info = Variable(shape_info).cuda()

                # image_var = self.input_method(image_var)
                afmap_pred = self.model(image_var)

                lines_pred, xx, yy = lsgenerator(
                    afmap_pred[0].cpu().data.numpy())
                afmap_gt, label = afm(lines_var, shape_info,
                                      image_var.shape[2], image_var.shape[3])
                image_raw = cv2.imread(
                    osp.join(dataset.dataset.data_root, 'images', fname[0]))
                # import pdb
                # pdb.set_trace()
                output_dict = {
                    'image': image_raw,
                    'image_resized': image_var[0].cpu().data.numpy(),
                    'lines_pred_resized': lines_pred,
                    'lines_gt': lines.numpy(),
                    'afmap_pred': afmap_pred[0].cpu().data.numpy(),
                    'afmap_gt': afmap_gt[0].cpu().data.numpy(),
                    'fname': fname[0],
                    'output_dir': osp.join(self.resultDir, name),
                }
                self.output_method(output_dict, cfg)
                bar.update(i)

            end_time = time.time()

            print('Total images: {}'.format(len(dataset)))
            print('Total time: {} ellapsed for {}'.format(
                end_time - start_time, cfg.TEST.OUTPUT_MODE))
            print('Frames per Second: {}'.format(
                len(dataset) / (end_time - start_time)))
Example #8
0
    def train(self, trainLoader, epoch):
        self.model.train()
        print("=> Training epoch")
        avgLoss = RunningAverage()
        avgAcces = {}
        for metric in self.metrics:
            avgAcces[metric] = RunningAverage()
        self.progbar = progbar(len(trainLoader), width=self.opt.barwidth)
        for i, (input, target) in enumerate(trainLoader):
            if self.opt.debug and i > 10:  # check debug.
                break
            start = time.time()
            inputV, targetV= Variable(input), Variable(target)
            if self.opt.GPU:
                inputV, targetV = inputV.cuda(), targetV.cuda()

            self.optimizer.zero_grad()
            dataTime = time.time() - start

            output = self.model(inputV)
            loss = self.criterion(output, targetV) * 255 * 255 / 144 / 144
            loss.backward()
            self.optimizer.step()

            # LOG ===
            runTime = time.time() - start
            avgLoss.update(float(loss))
            logAcc = []
            a = output.data.cpu().numpy()
            b = targetV.data.cpu().numpy()
            for metric in self.metrics:
                avgAcces[metric].update(self.metrics[metric](a, b))
                logAcc.append((metric, float(avgAcces[metric]())))
            del a, b
            log = updateLog(epoch, i, len(trainLoader), runTime, dataTime, float(loss), avgAcces)
            self.logger['train'].write(log)
            self.progbar.update(i, [('Time', runTime), ('loss', float(loss)), *logAcc])
            # END LOG ===

        log = '\n* Finished training epoch # %d  Loss: %1.4f  ' % (epoch, avgLoss())
        for metric in avgAcces:
            log += metric + " %1.4f  " % avgAcces[metric]()
        log += '\n'
        self.logger['train'].write(log)
        print(log)
        return avgLoss()
Example #9
0
def load_paths(opt, base, pa, split):
    # ignoring the file with no label.
    print('\t-> processing {} data'.format(split))
    X = []
    bar = progbar(len(pa), width=opt.barwidth)
    for idx, patients in enumerate(pa):
        # file contains GT.nii // patient_xx.nii.gz
        gt_path = os.path.join(base, patients, 'GT.nii.gz')
        img_path = os.path.join(base, patients, patients + '.nii.gz')
        gt = nib.load(gt_path).get_data()
        for i in range(gt.shape[2]):
            if np.sum(gt[:, :, i]) == 0 and split == 'train':
                continue
            X.append(((patients, i), gt_path, img_path))
            # if i == gt.shape[2] - 1 and (split == 'val' or split == 'test'):  # last slice
            #     X.append(((patients, -1), None, None))
        bar.update(idx + 1)
    return X
    def train(self, trainLoader, epoch):
        self.model.train()

        print('=> Training epoch # ' + str(epoch))

        # define avgLoss to Zero
        avgLoss = 0
        visImg = []

        # program 진행 상태를 보여주는 클래스
        self.progbar = progbar(len(trainLoader), width=self.opt.barwidth)

        # trainLoader 에서 데이터 추출 -> torch DataLoader 좀 더 공부..
        for i, (inputData, line, imgids) in enumerate(trainLoader):
            if self.opt.debug and i > 10:
                break

            start = time.time()

            # Tensor 를 Variable Class로 변환 -> 자동으로 변화도를 계산할 수 있음.
            inputData_var, line_var = Variable(inputData), Variable(line)
            self.optimizer.zero_grad()

            if self.opt.GPU:
                inputData_var = inputData_var.cuda()
                line_var = line_var.cuda()

            dataTime = time.time() - start

            # foward 진행    input : original img data, line img data
            # self.model : stackedHGB
            loss, line_loss, line_result = self.model.forward(
                inputData_var, line_var)

            # backward 진행
            loss.backward()
            self.optimizer.step()

            # run time 계산
            runTime = time.time() - start

            # warning zone : 수정 필요
            avgLoss = (avgLoss * i + loss.data) / (i + 1)

            # warning zone : 수정 필요
            log = 'Epoch: [%d][%d/%d] Time %1.3f Data %1.3f Err %1.4f\n' % (
                epoch, i, len(trainLoader), runTime, dataTime, loss.data)

            self.logger['train'].write(log)

            # error zone : probar code 수정 필요 -> type Error & moudle Error but learning에는 지장 없음.
            self.progbar.update(i, [('Time', runTime), ('Loss', loss.data)])

            if i <= self.opt.visTrain:
                visImg.append(inputData)
                visImg.append(line_result.cpu().data)
                visImg.append(line)

            #if i == self.opt.visTrain:
            #    self.visualize(visImg, epoch, 'train', trainLoader.dataset.postprocess, trainLoader.dataset.postprocessLine)

        log = '\n * Finished training epoch # %d     Loss: %1.4f\n' % (epoch,
                                                                       avgLoss)
        self.logger['train'].write(log)
        print(log)

        return avgLoss
    def test(self, valLoader, epoch):
        print(
            "*********************************test****************************"
        )
        self.model.eval()

        avgLoss = 0
        visImg = []

        self.progbar = progbar(len(valLoader), width=self.opt.barwidth)

        print("valLoader : ", len(valLoader))

        for i, (inputData, line, imgids) in enumerate(valLoader):
            if self.opt.debug and i > 10:
                break

            start = time.time()
            with torch.no_grad():
                inputData_var, line_var = Variable(
                    inputData, volatile=True), Variable(line, volatile=True)
                #print(inputData_var.shape, '  ', line_var.shape)
            if self.opt.GPU:
                inputData_var = inputData_var.cuda()
                line_var = line_var.cuda()

            dataTime = time.time() - start

            loss, line_loss, line_result = self.model.forward(
                inputData_var, line_var)

            #print("x_1 shape[0]", x_1.shape)

            #             plt.figure(figsize=(8, 8))

            #             arr_img1 = []

            #             if i == 1:
            #                 idx = 1
            #                 for j in range(36):
            #                     img = x_1[1][j].cpu().detach().numpy()
            #                     ax = plt.subplot(6, 6, idx)
            #                     ax.set_xticks([])
            #                     ax.set_yticks([])
            #                     plt.imshow(img, cmap='gray')
            #                     idx += 1
            # cv2.imshow("img",img)
            # cv2.waitKey(300)
            #                 plt.show()
            # for _ in range(8):
            #     for _ in range(8):
            #         ax = plt.subplot(8, 8, idx)
            #         ax.set_xticks([])
            #         ax.set_yticks([])
            #         plt.imshow(arr_img1[idx], cmap='gray')
            #         idx += 1

            # print("x_1 shape[0]", x_1[0][0].shape)
            # print("x_1[0][0] type : ", type(x_1[0][0]))
            # img = x_1[0][0].cpu().detach().numpy()
            # print(img)
            # cv2.imshow("img",img)
            # cv2.waitKey(300000)

            runTime = time.time() - start

            avgLoss = (avgLoss * i + loss.data) / (i + 1)

            log = 'Epoch: [%d][%d/%d] Time %1.3f Data %1.3f Err %1.4f\n' % (
                epoch, i, len(valLoader), runTime, dataTime, loss.data)
            self.logger['val'].write(log)
            self.progbar.update(i, [('Time', runTime), ('Loss', loss.data)])

            if i <= self.opt.visTest:
                visImg.append(inputData.cpu())
                visImg.append(line_result.cpu().data)
                visImg.append(line)

            if i == self.opt.visTest:
                self.visualize(visImg, epoch, 'test',
                               valLoader.dataset.postprocess,
                               valLoader.dataset.postprocessLine)

#             outDir = os.path.join(self.opt.resume, str(epoch))
#             if not os.path.exists(outDir):
#                 os.makedirs(outDir)

#             for j in range(len(imgids)):
#                 np.save(os.path.join(outDir, imgids[j] + '_line.npy'), valLoader.dataset.postprocessLine()(line_result.cpu().data[j].numpy()))

        log = '\n * Finished testing epoch # %d      Loss: %1.4f\n' % (epoch,
                                                                       avgLoss)
        self.logger['val'].write(log)
        print(log)

        return avgLoss
    def test(self, valLoader, epoch):
        print(
            "*********************************test****************************"
        )
        self.model.eval()

        avgLoss = 0
        visImg = []

        self.progbar = progbar(len(valLoader), width=self.opt.barwidth)

        print("valLoader : ", len(valLoader))

        for i, (inputData, line, imgids) in enumerate(valLoader):
            if self.opt.debug and i > 10:
                break

            start = time.time()
            with torch.no_grad():
                inputData_var, line_var = Variable(inputData), Variable(line)
                #print(inputData_var.shape, '  ', line_var.shape)
            if self.opt.GPU:
                inputData_var = inputData_var.cuda()
                line_var = line_var.cuda()

            dataTime = time.time() - start

            loss, line_loss, line_result = self.model.forward(
                inputData_var, line_var)

            #print(line_result)
            img_line = line[0][0].numpy()
            ''' img_origin imshow
            img_origin = inputData[0].numpy()
            print(line_result.shape)

            mean = np.array([0.485, 0.456, 0.406])
            std = np.array([0.229, 0.224, 0.225])
            img_origin = np.transpose(img_origin, (1, 2, 0))
            img_origin = t.unNormalize(img_origin, mean, std)
            '''

            mean = np.array([0.485, 0.456, 0.406])
            std = np.array([0.229, 0.224, 0.225])

            img_result = line_result[0].cpu().detach().numpy()
            print(np.min(img_result), ' ', np.max(img_result))
            img_result_plt = line_result[0][0].cpu().detach().numpy()
            img_result = np.transpose(img_result, (1, 2, 0))
            img_result = t.unNormalize(img_result, mean, std)
            print(img_result)
            print(np.min(img_result), ' ', np.max(img_result))
            img_result = np.clip(img_result, 0, 255)
            img_result = np.uint8(img_result)
            #print(img_result)

            img_result = cv2.resize(img_result,
                                    dsize=(320, 320),
                                    interpolation=cv2.INTER_LANCZOS4)

            cv2.imshow("Gray", img_result)
            cv2.waitKey(30000)

            #print(img_result)
            #print(img_result.shape)
            #plt.figure(1)
            #plt.axis("off")
            # cv2.imshow("Gray",img_result)
            #plt.imshow(img_result_plt,cmap="gray")
            # plt.imshow(cv2.cvtColor(img_origin, cv2.COLOR_BGR2RGB))
            # plt.imshow(img)
            #plt.show()

            #print("x_1 shape[0]", x_1.shape)

#             plt.figure(figsize=(8, 8))

#             arr_img1 = []

#             if i == 1:
#                 idx = 1
#                 for j in range(36):
#                     img = x_1[1][j].cpu().detach().numpy()
#                     ax = plt.subplot(6, 6, idx)
#                     ax.set_xticks([])
#                     ax.set_yticks([])
#                     plt.imshow(img, cmap='gray')
#                     idx += 1
# cv2.imshow("img",img)
# cv2.waitKey(300)
#                 plt.show()
# for _ in range(8):
#     for _ in range(8):
#         ax = plt.subplot(8, 8, idx)
#         ax.set_xticks([])
#         ax.set_yticks([])
#         plt.imshow(arr_img1[idx], cmap='gray')
#         idx += 1

# print("x_1 shape[0]", x_1[0][0].shape)
# print("x_1[0][0] type : ", type(x_1[0][0]))
# img = x_1[0][0].cpu().detach().numpy()
# print(img)
# cv2.imshow("img",img)
# cv2.waitKey(300000)
        return 0
Example #13
0
 def start(self, lenDS):
     self.lenDS = lenDS
     self.avgLoss = RunningAverageDict(10.0)
     self.avgAcces = RunningAverageDict(0.0)
     self.progbar = progbar(self.lenDS, width=self.opt.barwidth)
     self.log_interval = int(lenDS / self.log_num)
Example #14
0
    def process(self, dataLoader, epoch, split):
        train = split == 'train'
        num_iters = int(dataLoader[1] // self.batchSize)
        batch_X, batch_Y = tf.placeholder(tf.float32, shape=[None, 144, 144, 3]), \
                           tf.placeholder(tf.float32, shape=[None, 144, 144, 3])
        # /-------------------------^-------------------------\
        out = self.model(batch_X)
        loss = self.criterion(out, batch_Y)
        train_op = self.train_op.minimize(loss)
        # init loss and accuracy
        avgLoss = RunningAverage()
        avgAcces = {}
        for metric in self.metrics:
            avgAcces[metric] = RunningAverage()
        bar = progbar(num_iters, width=self.opt.barwidth)
        print("\n=> [{}]ing epoch : {}".format(split, epoch))
        # train
        if epoch == 1 and train:
            self.sess.run(tf.global_variables_initializer())

        coord = tf.train.Coordinator()
        # begin one epoch
        for i in range(num_iters):
            if self.opt.debug and i > 2:  # check debug.
                break

            startTime = time.time()
            X_numpy, Y_numpy = self.sess.run(dataLoader[0].get_next())
            dataTime = time.time() - startTime

            logAcc = []
            if train:
                it = num_iters * (epoch - 1) + i * self.opt.batchSize
                lr, _, cost, out_eval = self.sess.run(
                    [self.lr, train_op, loss, out],
                    feed_dict={
                        batch_X: X_numpy,
                        batch_Y: Y_numpy,
                        self.ep: it
                    })
                logAcc.append(('LR', lr))
            else:
                cost, out_eval = self.sess.run([loss, out],
                                               feed_dict={
                                                   batch_X: X_numpy,
                                                   batch_Y: Y_numpy
                                               })

            runningTime = time.time() - startTime

            # log record.
            avgLoss.update(cost)
            for metric in self.metrics:
                avgAcces[metric].update(self.metrics[metric](Y_numpy,
                                                             out_eval))
                logAcc.append((metric, float(avgAcces[metric]())))

            bar.update(i, [('Time', runningTime),
                           ('loss', float(cost)), *logAcc])
            log = updateLog(epoch, i, num_iters, runningTime, dataTime, cost,
                            avgAcces)
            self.logger[split].write(log)

        coord.request_stop()
        coord.join()
        return avgLoss()