'Backbone pretrained model, could be resnet50, resnet101 or resnet152')

    options, args = parser.parse_args()
    mode = options.mode
    backbone = options.backbone
    if backbone not in ['resnet50', 'resnet101', 'resnet152']:
        assert ValueError('Invalid backbone: %s' % backbone)
    net = load_model(backbone)
    if mode == 'valid':
        datadir = cfg.root
        batch_size = 2
        anno_file = os.path.join(datadir, 'annotation.json')
        target_dir = 'valid_target.json'
        predict_dir = backbone + '_predict.json'
        val_transform = transforms.Compose(
            [transforms.ToTensor(),
             transforms.Normalize(cfg.mean, cfg.std)])
        valset = VocLikeDataset(image_dir=cfg.val_image_dir,
                                annotation_file=cfg.annotation_file,
                                imageset_fn=cfg.val_imageset_fn,
                                image_ext=cfg.image_ext,
                                classes=cfg.classes,
                                encoder=DataEncoder(),
                                transform=val_transform)
        valloader = torch.utils.data.DataLoader(valset,
                                                batch_size=batch_size,
                                                shuffle=False,
                                                num_workers=cfg.num_workers,
                                                collate_fn=valset.collate_fn)
        eval_valid(net, valloader, anno_file, cfg.test_dir)
        filedir = os.path.join(datadir, target_dir)
예제 #2
0
def prediction(img1, img2, label, weight):
    print("weight")

    best_metric = 0
    train_transform_det = trans.Compose([
        trans.Scale(cfg.TRANSFROM_SCALES),
    ])
    val_transform_det = trans.Compose([
        trans.Scale(cfg.TRANSFROM_SCALES),
    ])
    test_transform_det = trans.Compose([
        trans.Scale((960, 960)),
    ])
    model = SiamUNetU(in_ch=3)
    model = torch.nn.DataParallel(model)
    if torch.cuda.is_available():
        model.cuda()
    # model.load_state_dict({k.replace('module.', ''): v for k, v in torch.load(weight).items()})
    # model.load_state_dict(torch.load(weight))
    checkpoint = torch.load(weight)
    model.load_state_dict(checkpoint['state_dict'])

    # test_data = my_dataset.Dataset(cfg.TEST_DATA_PATH, '',cfg.TEST_TXT_PATH, 'test', transform=True, transform_med=test_transform_det)
    test_data = my_dataset.Dataset(cfg.VAL_DATA_PATH,
                                   cfg.VAL_LABEL_PATH,
                                   cfg.VAL_TXT_PATH,
                                   'val',
                                   transform=True,
                                   transform_med=test_transform_det)
    test_dataloader = DataLoader(test_data,
                                 batch_size=cfg.TEST_BATCH_SIZE,
                                 shuffle=False,
                                 num_workers=8,
                                 pin_memory=True)
    crop = 0

    rows = 12
    cols = 12
    i = 0
    for batch_idx, val_batch in enumerate(test_dataloader):
        model.eval()
        batch_x1, batch_x2, _, filename, h, w = val_batch
        filename = filename[0].split('/')[-1].replace('image',
                                                      'mask_2017').replace(
                                                          '.png', '.tif')
        if crop:
            outputs = np.zeros((cfg.TEST_BATCH_SIZE, 1, 960, 960))

            while (i + w // rows <= w):
                j = 0
                while (j + h // cols <= h):
                    batch_x1_ij = batch_x1[batch_idx, :, i:i + w // rows,
                                           j:j + h // cols]
                    batch_x2_ij = batch_x2[batch_idx, :, i:i + w // rows,
                                           j:j + h // cols]
                    # batch_y_ij = batch_y[batch_idx,: , i:i + w // rows, j:j + h // cols]
                    batch_x1_ij = np.expand_dims(batch_x1_ij, axis=0)
                    batch_x2_ij = np.expand_dims(batch_x2_ij, axis=0)
                    batch_x1_ij, batch_x2_ij = Variable(
                        torch.from_numpy(batch_x1_ij)).cuda(), Variable(
                            torch.from_numpy(batch_x2_ij)).cuda()
                    with torch.no_grad():
                        output = model(batch_x1_ij, batch_x2_ij)
                    output_w, output_h = output.shape[-2:]
                    output = torch.sigmoid(output).view(output_w, output_h, -1)

                    output = output.data.cpu().numpy()  # .resize([80, 80, 1])
                    output = np.where(output > cfg.THRESH, 255, 0)
                    outputs[batch_idx, :, i:i + w // rows,
                            j:j + h // cols] = output

                    j += h // cols
                i += w // rows

            print(batch_idx)

            if not os.path.exists('./change'):
                os.mkdir('./change')
            print('./change/{}'.format(filename))
            cv2.imwrite('./change/crop_{}'.format(filename), outputs[batch_idx,
                                                                     0, :, :])
        else:
            batch_x1, batch_x2 = Variable(batch_x1).cuda(), Variable(
                batch_x2).cuda()
            with torch.no_grad():
                output = model(batch_x1, batch_x2)
            output_w, output_h = output.shape[-2:]
            output = torch.sigmoid(output).view(output_w, output_h, -1)
            output = output.data.cpu().numpy()  # .resize([80, 80, 1])
            output = np.where(output > cfg.THRESH, 255, 0)
            # output_final=cv2.merge(output)
            if not os.path.exists('./change'):
                os.mkdir('./change')

            print('./change/{}'.format(filename))
            cv2.imwrite('./change/{}'.format(filename), output)
예제 #3
0
def prediction(weight):
    print("weight")

    best_metric = 0
    train_transform_det = trans.Compose([
        trans.Scale(cfg.TRANSFROM_SCALES),
    ])
    val_transform_det = trans.Compose([
        trans.Scale(cfg.TRANSFROM_SCALES),
    ])

    test_transform_det = trans.Compose([
        trans.Scale(cfg.TEST_TRANSFROM_SCALES),
    ])
    model = SiamUNet()
    # model=torch.nn.DataParallel(model)

    if torch.cuda.is_available():
        model.cuda()
        print('gpu')

    # model.load_state_dict({k.replace('module.', ''): v for k, v in torch.load(weight).items()})
    # model.load_state_dict(torch.load(weight))
    checkpoint = torch.load(weight)
    model.load_state_dict(checkpoint['state_dict'])
    test_data = my_dataset.Dataset(cfg.TEST_DATA_PATH,
                                   cfg.TEST_LABEL_PATH,
                                   cfg.TEST_TXT_PATH,
                                   'val',
                                   transform=True,
                                   transform_med=test_transform_det)
    test_dataloader = DataLoader(test_data,
                                 batch_size=cfg.TEST_BATCH_SIZE,
                                 shuffle=False,
                                 num_workers=8,
                                 pin_memory=True)
    crop = 0

    rows = 12
    cols = 12
    i = 0
    for batch_idx, val_batch in enumerate(test_dataloader):
        model.eval()

        batch_x1, batch_x2, mask, im_name, h, w = val_batch
        print('mask_type{}'.format(mask.type))

        with torch.no_grad():
            batch_x1, batch_x2 = Variable((batch_x1)).cuda(), Variable(
                ((batch_x2))).cuda()

            try:
                print('try')
                output = model(batch_x1, batch_x2)
                del batch_x1, batch_x2
            except RuntimeError as exception:
                if 'out of memory' in str(exception):
                    print('WARNING: out of memory')
                    if hasattr(torch.cuda, 'empty_cache'):
                        torch.cuda.empty_cache()
                else:
                    print('exception')
                    raise exception
        # print(output)
        output_w, output_h = output.shape[-2:]
        output = torch.sigmoid(output).view(output_w, output_h, -1)
        # print(output)
        output = output.data.cpu().numpy()  # .resize([80, 80, 1])
        output = np.where(output > cfg.THRESH, 255, 0)
        # print(output)
        # have no mask so can not eval_cal
        # precision,recall,F1=eval_cal(output,mask)
        # print('precision:{}\nrecall:{}\nF1:{}'.format(precision,recall,F1))

        print(im_name)
        im_n = im_name[0].split('/')[1].split('.')[0].split('_')
        im__path = 'final_result/weight50_dmc/mask_2017_2018_960_960_' + im_n[
            4] + '.tif'

        # im__path = 'weitht50_tif.tif'
        im_data = np.squeeze(output)
        print(im_data.shape)
        im_data = np.array([im_data])
        print(im_data.shape)
        im_geotrans = (0.0, 1.0, 0.0, 0.0, 0.0, 1.0)
        im_proj = ''
        im_width = 960
        im_height = 960
        im_bands = 1
        datatype = gdal.GDT_Byte
        driver = gdal.GetDriverByName("GTiff")
        dataset = driver.Create(im__path, im_width, im_height, im_bands,
                                datatype)
        if dataset != None:
            print("----{}".format(im__path))
            dataset.SetGeoTransform(im_geotrans)
            dataset.SetProjection(im_proj)
        for i in range(im_bands):
            dataset.GetRasterBand(i + 1).WriteArray(im_data[i])

        del dataset
예제 #4
0
def main():
    best_metric = 0
    train_transform_det = trans.Compose([
        trans.Scale(cfg.TRANSFROM_SCALES),
    ])
    val_transform_det = trans.Compose([
        trans.Scale(cfg.TRANSFROM_SCALES),
    ])

    train_data = my_dataset.Dataset(cfg.TRAIN_DATA_PATH,
                                    cfg.TRAIN_LABEL_PATH,
                                    cfg.TRAIN_TXT_PATH,
                                    'train',
                                    transform=True,
                                    transform_med=train_transform_det)
    val_data = my_dataset.Dataset(cfg.VAL_DATA_PATH,
                                  cfg.VAL_LABEL_PATH,
                                  cfg.VAL_TXT_PATH,
                                  'val',
                                  transform=True,
                                  transform_med=val_transform_det)
    train_dataloader = DataLoader(train_data,
                                  batch_size=cfg.BATCH_SIZE,
                                  shuffle=True,
                                  num_workers=1,
                                  pin_memory=True)
    val_dataloader = DataLoader(val_data,
                                batch_size=cfg.BATCH_SIZE,
                                shuffle=False,
                                num_workers=1,
                                pin_memory=True)

    model = DeepLabV3(model_id=1, project_dir=cfg.BASE_PATH)
    if cfg.RESUME:
        checkpoint = torch.load(cfg.TRAINED_LAST_MODEL)
        model.load_state_dict(checkpoint['state_dict'])
        print('resume success \n')
    if torch.cuda.device_count() > 1:
        print("Let's use", torch.cuda.device_count(), "GPUs!")
        # dim = 0 [30, xxx] -> [10, ...], [10, ...], [10, ...] on 3 GPUs
        model = nn.DataParallel(model)

    if torch.cuda.is_available():
        model.cuda()

    # if torch.cuda.is_available():
    #     model.cuda()

    # params = [{'params': md.parameters()} for md in model.children() if md in [model.classifier]]
    optimizer = optim.Adam(model.parameters(),
                           lr=cfg.INIT_LEARNING_RATE,
                           weight_decay=cfg.DECAY)
    scheduler = ReduceLROnPlateau(optimizer,
                                  mode='min',
                                  factor=0.2,
                                  patience=5,
                                  verbose=True,
                                  threshold=0.0001,
                                  threshold_mode='rel',
                                  cooldown=2,
                                  eps=1e-08)
    fl = FocalLoss2d(gamma=cfg.FOCAL_LOSS_GAMMA)
    Loss_list = []
    Accuracy_list = []
    for epoch in range(cfg.EPOCH):
        print('epoch {}'.format(epoch + 1))
        #training--------------------------
        train_loss = 0
        train_acc = 0
        for batch_idx, train_batch in enumerate(train_dataloader):
            model.train()
            batch_det_img, batch_y, _, _, _, _, _ = train_batch
            batch_det_img, batch_y = Variable(batch_det_img).cuda(), Variable(
                batch_y).cuda()
            output = model(batch_det_img)
            del batch_det_img
            loss = calc_loss(output, batch_y)
            # train_loss += loss.data[0]
            #should change after
            # pred = torch.max(out, 1)[0]
            # train_correct = (pred == batch_y).sum()
            # train_acc += train_correct.data[0]
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            if (batch_idx) % 5 == 0:
                model.eval()
                val_loss = 0
                for v_batch_idx, val_batch in enumerate(val_dataloader):
                    v_batch_det_img, v_batch_y, _, _, _, _, _ = val_batch
                    v_batch_det_img, v_batch_y = Variable(
                        v_batch_det_img).cuda(), Variable(v_batch_y).cuda()
                    val_out = model(v_batch_det_img)
                    del v_batch_det_img
                    val_loss += float(calc_loss(val_out, v_batch_y))
                scheduler.step(val_loss)
                del val_out, v_batch_y
                print("Train Loss: {:.6f}  Val Loss: {:.10f}".format(
                    loss, val_loss))

        if (epoch + 1) % 10 == 0:
            torch.save({'state_dict': model.state_dict()},
                       os.path.join(
                           cfg.SAVE_MODEL_PATH, cfg.TRAIN_LOSS,
                           'model_tif_deeplab18_bce_240*240_' +
                           str(epoch + 1) + '.pth'))
    torch.save({'state_dict': model.state_dict()},
               os.path.join(cfg.SAVE_MODEL_PATH, cfg.TRAIN_LOSS,
                            'model_tif_deeplab18_bce_240*240_last.pth'))
예제 #5
0
def main():
    best_metric = 0
    train_transform_det = trans.Compose([
        trans.Scale(cfg.TRANSFROM_SCALES),
    ])
    val_transform_det = trans.Compose([
        trans.Scale(cfg.TRANSFROM_SCALES),
    ])

    train_data = my_dataset.Dataset(cfg.TRAIN_DATA_PATH,
                                    cfg.TRAIN_LABEL_PATH,
                                    cfg.TRAIN_TXT_PATH,
                                    'train',
                                    transform=True,
                                    transform_med=train_transform_det)
    val_data = my_dataset.Dataset(cfg.VAL_DATA_PATH,
                                  cfg.VAL_LABEL_PATH,
                                  cfg.VAL_TXT_PATH,
                                  'val',
                                  transform=True,
                                  transform_med=val_transform_det)
    train_dataloader = DataLoader(train_data,
                                  batch_size=cfg.BATCH_SIZE,
                                  shuffle=True,
                                  num_workers=8,
                                  pin_memory=True)
    val_dataloader = DataLoader(val_data,
                                batch_size=cfg.BATCH_SIZE,
                                shuffle=False,
                                num_workers=8,
                                pin_memory=True)

    model = SiamUNet(in_ch=3)
    if cfg.RESUME:
        checkpoint = torch.load(cfg.TRAINED_LAST_MODEL)
        model.load_state_dict(checkpoint['state_dict'])
        print('resume success \n')
    if torch.cuda.device_count() > 1:
        print("Let's use", torch.cuda.device_count(), "GPUs!")
        # dim = 0 [30, xxx] -> [10, ...], [10, ...], [10, ...] on 3 GPUs
        model = nn.DataParallel(model)

    if torch.cuda.is_available():
        model.cuda()

    # if torch.cuda.is_available():
    #     model.cuda()

    # params = [{'params': md.parameters()} for md in model.children() if md in [model.classifier]]
    optimizer = optim.Adam(model.parameters(),
                           lr=cfg.INIT_LEARNING_RATE,
                           weight_decay=cfg.DECAY)
    fl = FocalLoss2d(gamma=cfg.FOCAL_LOSS_GAMMA)
    Loss_list = []
    Accuracy_list = []
    scheduler = StepLR(optimizer, step_size=8, gamma=0.1)
    for epoch in range(cfg.EPOCH):
        scheduler.step()
        print('epoch {}'.format(epoch + 1))
        #training--------------------------
        train_loss = 0
        train_acc = 0
        for batch_idx, train_batch in enumerate(train_dataloader):
            model.train()
            batch_x1, batch_x2, batch_y, _, _, _ = train_batch
            batch_x1, batch_x2, batch_y = Variable(batch_x1).cuda(), Variable(
                batch_x2).cuda(), Variable(batch_y).cuda()
            outputs = model(batch_x1, batch_x2)
            del batch_x1, batch_x2
            loss = calc_loss_L4(outputs[0], outputs[1], outputs[2], outputs[3],
                                batch_y)
            # train_loss += loss.data[0]
            #should change after
            # pred = torch.max(out, 1)[0]
            # train_correct = (pred == batch_y).sum()
            # train_acc += train_correct.data[0]
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            if (batch_idx) % 5 == 0:
                model.eval()
                val_loss = 0
                for v_batch_idx, val_batch in enumerate(val_dataloader):
                    v_batch_x1, v_batch_x2, v_batch_y, _, _, _ = val_batch
                    v_batch_x1, v_batch_x2, v_batch_y = Variable(
                        v_batch_x1).cuda(), Variable(
                            v_batch_x2).cuda(), Variable(v_batch_y).cuda()
                    val_outs = model(v_batch_x1, v_batch_x2)
                    del v_batch_x1, v_batch_x2
                    val_loss += float(
                        calc_loss_L4(val_outs[0], val_outs[1], val_outs[2],
                                     val_outs[3], v_batch_y))
                del val_outs, v_batch_y
                print("Train Loss: {:.6f}  Val Loss: {:.10f}".format(
                    loss, val_loss))

        if (epoch + 1) % 5 == 0:
            torch.save({'state_dict': model.state_dict()},
                       os.path.join(cfg.SAVE_MODEL_PATH, cfg.TRAIN_LOSS,
                                    'model_tif_' + str(epoch + 1) + '.pth'))
    torch.save({'state_dict': model.state_dict()},
               os.path.join(cfg.SAVE_MODEL_PATH, cfg.TRAIN_LOSS,
                            'model_tif_last.pth'))
예제 #6
0
def prediction( weight):

    best_metric = 0
    train_transform_det = trans.Compose([
        trans.Scale(cfg.TRANSFROM_SCALES),
    ])
    val_transform_det = trans.Compose([
        trans.Scale(cfg.TRANSFROM_SCALES),
    ])
    test_transform_det = trans.Compose([
        trans.Scale((960,960)),
    ])
    model = DeepLabV3(model_id=1,project_dir=cfg.BASE_PATH)
    # model=torch.nn.DataParallel(model)
    if torch.cuda.is_available():
        model.cuda()
    # model.load_state_dict({k.replace('module.', ''): v for k, v in torch.load(weight).items()})
    # model.load_state_dict(torch.load(weight))
    checkpoint = torch.load(weight)
    model.load_state_dict(checkpoint['state_dict'])

    # test_data = my_dataset.Dataset(cfg.TEST_DATA_PATH, '',cfg.TEST_TXT_PATH, 'test', transform=True, transform_med=test_transform_det)
    test_data = my_dataset.Dataset(cfg.VAL_DATA_PATH, cfg.VAL_LABEL_PATH,cfg.VAL_TXT_PATH, 'val', transform=True, transform_med=test_transform_det)
    # test_data = my_dataset.Dataset(cfg.TRAIN_DATA_PATH, cfg.TRAIN_LABEL_PATH,cfg.TRAIN_TXT_PATH, 'train', transform=True, transform_med=test_transform_det)
    test_dataloader = DataLoader(test_data, batch_size=cfg.TEST_BATCH_SIZE, shuffle=False, num_workers=8, pin_memory=True)
    crop = 0

    for batch_idx, val_batch in enumerate(test_dataloader):
        model.eval()
        #
        # batch_x1, batch_x2, _, filename, h, w, green_mask1, green_mask2 = val_batch
        batch_det_img, _, filename, h, w,_,green_mask2 = val_batch
        # green_mask1 = green_mask1.view(output_w, output_h, -1).data.cpu().numpy()
        filename = filename[0].split('/')[-1].replace('image','mask_2017')
        if crop:
            pass
            # outputs = np.zeros((cfg.TEST_BATCH_SIZE,1,960, 960))
            #
            # while (i + w // rows <= w):
            #     j = 0
            #     while (j + h // cols <= h):
            #         batch_x1_ij = batch_x1[0, :, i:i + w // rows, j:j + h // cols]
            #         batch_x2_ij = batch_x2[0, :, i:i + w // rows, j:j + h // cols]
            #         # batch_y_ij = batch_y[batch_idx,: , i:i + w // rows, j:j + h // cols]
            #         batch_x1_ij = np.expand_dims(batch_x1_ij, axis=0)
            #         batch_x2_ij = np.expand_dims(batch_x2_ij, axis=0)
            #         batch_x1_ij, batch_x2_ij = Variable(torch.from_numpy(batch_x1_ij)).cuda(), Variable(
            #             torch.from_numpy(batch_x2_ij)).cuda()
            #         with torch.no_grad():
            #             output = model(batch_x1_ij, batch_x2_ij)
            #         output_w, output_h = output.shape[-2:]
            #         output = torch.sigmoid(output).view(-1, output_w, output_h)
            #
            #         output = output.data.cpu().numpy()  # .resize([80, 80, 1])
            #         output = np.where(output > cfg.THRESH, 255, 0)
            #         outputs[0, :, i:i + w // rows, j:j + h // cols] = output
            #
            #         j += h // cols
            #     i += w // rows
            #
            #
            # if not os.path.exists('./change'):
            #     os.mkdir('./change')
            # print('./change/{}'.format(filename))
            # cv2.imwrite('./change/crop_{}'.format(filename), outputs[0,0,:,:])
        else:
            batch_det_img = Variable(batch_det_img).cuda()
            with torch.no_grad():
                outputs = model(batch_det_img)

            output_w, output_h = outputs[0].shape[-2:]

            # green_mask2 = green_mask2.view(output_w, output_h, -1).data.cpu().numpy()

            output = torch.sigmoid(outputs).view(output_w, output_h, -1).data.cpu().numpy()
            # print(output.min(),output.max())
            output = np.where((output  > cfg.THRESH) , 255, 0)
            if not os.path.exists('./change'):
                os.mkdir('./change')

            print('./change/{}'.format(filename))
            cv2.imwrite('./change/{}'.format(filename), output)
예제 #7
0
    def __init__(self, encoder, transform=None, test=False, val=False):
#         self.class_map = {c: i for i, c in enumerate(['2.1', '2.4', '3.1', '3.24', '3.27', '4.1', '4.2', '5.19', '5.20', '8.22'], 1)}
#         self.class_map = defaultdict(lambda: 0, self.class_map)
#         self.categories = {v:k for k, v in self.class_map.items

        self.labels_paths = glob(os.path.join(DIR, '**/annotations/**/*.tsv'), recursive=True)
        self.labels_paths = [x for x in self.labels_paths if x.split('/')[5] in ('RTSD', 'ice')]
        # print(self.labels_paths)
        if val:
            self.labels_paths = [x for x in self.labels_paths if x.split('/')[-2] in test_folders]
        else:
            self.labels_paths = [x for x in self.labels_paths if x.split('/')[-2] not in test_folders]
        print(len(self.labels_paths))
        self.tasks = []

        with open(os.path.join(DIR, 'classes_map.pkl'), 'rb') as f:
            self.label_map = pickle.load(f)
        print(self.label_map)
        self.val = val
        paths = []
        self.test = test
        self.encoder = encoder
        self.task_dict = {}
        self.transform = transform
        np.random.shuffle(self.labels_paths)
        print("FINDING TABLES")
        from time import time
        from functools import partial
        tik = time()
        # for path in tqdm(self.labels_paths[:3000]):
        # update_task(tasks=self.task_dict, path=path)

        fname = 'train.tasks' if not val else 'val.tasks'
        if not os.path.exists(fname):
            Parallel(n_jobs=8, require='sharedmem')(delayed(partial(update_task, tasks=self.task_dict, label_map=self.label_map))(path) for path in tqdm(self.labels_paths[:]))
            with open(fname, 'wb') as f:
                pickle.dump(self.task_dict, f)
        else:
            print('FOUND CACHE', fname)
            with open(fname, 'rb') as f:
                self.task_dict = pickle.load(f)
        tok = time()
        print('elapsed {:.1f}'.format((tok-tik)/60), 'minutes')
        #for path in tqdm(self.labels_paths):
            #print(val, path.split('/')[-2])
         #   pass
        self.tasks = list(self.task_dict.keys())

#         width_count = 0
#         height_count = 0
#         total_count = 0
#         for t, d in self.task_dict.items():
#             for b in d['boxes']:
#                 if (b.right - b.left) >= 512:
#                     width_count += 1
#                 if (b.bottom-b.top) >= 512:
#                     height_count+=1
#                 total_count += 1
#         print('\n\n\n')
#         print(width_count / total_count)
#         print(height_count / total_count)
#         print('\n\n\n')

        self.pil_transform = transforms.Compose([
            transforms.ResizeKeepAR(cfg.width, cfg.height),
            transforms.RandomCrop(cfg.width, cfg.height, 5, 100, 0.3),
        ])


        print("FOUND", len(self.tasks), " TABLES FOR {}".format("TRAIN" if not val else "TEST"))
        import sys
        sys.stdout.flush()