コード例 #1
0
def pred_and_fill(img, op, radius, patch, nets, outputs="./flatting/gradio"):
    
    # initail out files
    outs = []
    outs.append(join(outputs, "%s_input.png"%op))
    outs.append(join(outputs, "%s_fill.png"%op))
    outs.append(join(outputs, "%s_fill_edge.png"%op))
    outs.append(join(outputs, "%s_fill_line.png"%op))
    outs.append(join(outputs, "%s_fill_line_full.png"%op))
    

    # predict full image
    # img = cv2.threshold(img, 240, 255, cv2.THRESH_BINARY)
    if patch == "False":
        # img_w = Image.new("RGBA", img.size, "WHITE")
        # try:
        #     img_w.paste(img, None, img)
        #     img = img_w.convert("L")
        # except:
        #     print("Log:\tfailed to add white background")
        
        edge = predict_img(net=nets[op][0],
               full_img=img,
               device=nets[op][1],
               size = int(op.replace("_rand", "")))
    else:
        print("Log:\tsplit input into 4 patch with model %s"%(op))
        # cut image into non-overlapping patches
        imgs = split_to_4(img)

        edges = []
        for patch in imgs:
            edge = predict_img(net=nets[op][0],
               full_img=patch,
               device=nets[op][1],
               size = int(op))

            edges.append(np.array(edge))

        edge = Image.fromarray(merge_to_1(edges))

    print("Log:\ttrapping ball filling with radius %s"%radius)
    fill = region_get_map(edge.convert("L"),
        radius_set=[int(radius)], percentiles=[0],
        path_to_line_artist=img,
        return_numpy=True,
        preview = True)

    return edge, fill
コード例 #2
0
def getApplicationsImg():
    getData = request.files.get('image')
    img_data = getData.read()
    np_ary = np.fromstring(img_data, np.uint8)
    cv_image = cv2.imdecode(np_ary, cv2.IMREAD_COLOR)
    predict_Ary = predict.predict_img(cv_image)
    return jsonify(predict_Ary), 201
コード例 #3
0
def submit(net, gpu=False):
    """Used for Kaggle submission: predicts and encode all test images"""
    dir = 'data/test/'

    N = len(list(os.listdir(dir)))
    with open('SUBMISSION.csv', 'a') as f:
        f.write('img,rle_mask\n')
        for index, i in enumerate(os.listdir(dir)):
            print('{}/{}'.format(index, N))

            img = Image.open(dir + i)

            mask = predict_img(net, img, gpu)
            enc = rle_encode(mask)
            f.write('{},{}\n'.format(i, ' '.join(map(str, enc))))
コード例 #4
0
ファイル: submit.py プロジェクト: Zchhh73/Unet2D_torchVersion
def submit(net):
    """Used for Kaggle submission: predicts and encode all test images"""
    dir = 'data/test/'
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    N = len(list(os.listdir(dir)))
    with open('SUBMISSION.csv', 'a') as f:
        f.write('img,rle_mask\n')
        for index, i in enumerate(os.listdir(dir)):
            print('{}/{}'.format(index, N))

            img = Image.open(dir + i)

            mask = predict_img(net, img, device)
            enc = rle_encode(mask)
            f.write('{},{}\n'.format(i, ' '.join(map(str, enc))))
コード例 #5
0
def func_one():
    if request.method == 'GET':
        return render_template('index.html', value='hi')

    if request.method == 'POST':
        print(request.files)
        if 'file' not in request.files:
            print('File not uploaded')
            return

        file = request.files['file']
        img = file.read()
        f = io.BytesIO(img)
        predictions = predict_img(model, f)
        return render_template('predictor.html', prediction=predictions)
コード例 #6
0
ファイル: submit.py プロジェクト: inouetaka/UNet
def submit(net, device):
    dir = 'data/test/'

    N = len(list(os.listdir(dir)))
    with open('SUBMISSON.csv', 'a') as f:
        f.write('img,rle_mask\n')
        for index, i in enumerate(os.listdir(dir)):
            img = Image.open(dir + i)

            mask = predict_img(net, img, device)
            enc = rle_encode(mask)
            f.write('{},{}\n'.format(i, ' '.join(map(str, enc))))

            if index % 1e2 == 0:
                print('{}/{}'.format(index, N))
コード例 #7
0
def submit(net, gpu=False):
    """Used for Kaggle submission: predicts and encode all test images"""
    dir = '/home/pengbo/project/datasets/TGS_Salt/images/'

    N = len(list(os.listdir(dir)))
    with open('SUBMISSION.csv', 'a') as f:
        f.write('id,rle_mask\n')
        for index, i in enumerate(os.listdir(dir)):
            print('{}/{}'.format(index, N))

            img = Image.open(dir + i)

            mask = predict_img(net, img, gpu)
            #ipdb.set_trace()
            enc = rle_encode(mask)
            f.write('{},{}\n'.format(i.split('.')[0], ' '.join(map(str, enc))))
コード例 #8
0
ファイル: submit.py プロジェクト: geochri/unet.pytorch
def submit(net, args, gpu=False):
    """Used for Kaggle submission: predicts and encode all test images"""
    dir = os.path.join(args.dataset_folder, 'data/test/')

    N = len(list(os.listdir(dir)))

    rle_encoded_masks = []
    for index, i in enumerate(tqdm(os.listdir(dir))):
        img = Image.open(dir + i)

        mask = predict_img(net, img, gpu, use_dense_crf=not args.no_crf)
        enc = rle_encode(mask)
        rle_encoded_masks.append(enc)

    with open('submission.csv', 'w') as f:
        f.write('img,rle_mask\n')
        for index, i in enumerate(tqdm(os.listdir(dir))):
            print('{}/{}'.format(index, N))
            f.write('{},{}\n'.format(
                i, ' '.join(map(str, rle_encoded_masks[index]))))
コード例 #9
0
 # 读取所有图片路径
 print('begin')
 # 遍历所有图片
 net.to(device)
 net.load_state_dict(
     torch.load('/home/zhengzy/UnetTest/checkpoints/CP_epoch4.pth',
                map_location=device))
 m = 1
 dir = '/home/zhengzy/UnetTest/data/test/'
 save_path = '/home/zhengzy/UnetTest/predict_data/'
 file_list = os.listdir(dir)
 for file in file_list:
     img_path = dir + file
     image = Image.open(img_path)
     # print(image.shape)
     mask = predict_img(net, image, device)
     result = mask_to_image(mask)
     print(save_path + file)
     #保存结果
     result.save(save_path + file)
 # for k in range(9,14):
 #     path="F:/tf_unet_data/three/imgs/"
 #     img_path=path+str(k)+".jpg"
 #     # 保存结果地址
 #     print(img_path)
 #     save_res_path = path + str(k)+'_res.jpg'
 #     # 读取图片
 #     image = Image.open(img_path)
 #     # print(image.shape)
 #     # print(image.size)
 #     mask = predict_img(net,image,device)
コード例 #10
0
def train_net(net,
              device,
              epochs=5,
              batch_size=2,
              lr=0.0001,
              val_percent=0.2,
              save_cp=True,
              img_scale=1):

    # Init dataset and train/test split
    dataset = BasicDataset(dir_img, dir_mask, img_scale)
    n_val = int(len(dataset) * val_percent)
    n_train = len(dataset) - n_val
    train, val = random_split(dataset, [n_train, n_val])

    # Call DataLoader
    train_loader = DataLoader(train,
                              batch_size=batch_size,
                              shuffle=True,
                              num_workers=8,
                              pin_memory=True,
                              drop_last=True)
    val_loader = DataLoader(val,
                            batch_size=batch_size,
                            shuffle=False,
                            num_workers=8,
                            pin_memory=True,
                            drop_last=True)

    # Writer to tensorboard
    writer = SummaryWriter(
        comment=f'LR_{lr}_BS_{batch_size}_SCALE_{img_scale}')
    global_step = 0

    logging.info(f'''Starting training:
            Epochs:          {epochs}
            Batch size:      {batch_size}
            Learning rate:   {lr}
            Training size:   {n_train}
            Validation size: {n_val}
            Checkpoints:     {save_cp}
            Device:          {device.type}
            Images scaling:  {img_scale}
        ''')

    # Init optimizer and define lr_scheduler
    optimizer = optim.Adam(net.parameters(), lr=lr)
    scheduler = optim.lr_scheduler.ReduceLROnPlateau(
        optimizer, 'min' if net.n_classes > 1 else 'max', patience=2)

    # In this version, we use BCEWithLogitsLoss
    criterion = nn.BCEWithLogitsLoss()

    for epoch in range(epochs):
        net.train()

        epoch_loss = 0
        with tqdm(total=n_train,
                  desc=f'Epoch {epoch + 1}/{epochs}',
                  unit='img') as pbar:
            for batch in train_loader:
                imgs = batch['image']
                true_masks = batch['mask']

                # Set-up device
                imgs = imgs.to(device=device, dtype=torch.float32)
                mask_type = torch.float32 if net.n_classes == 1 else torch.long

                true_masks = true_masks.to(device=device, dtype=mask_type)

                # Forward
                masks_pred = net(imgs)

                loss = criterion(masks_pred, true_masks)
                epoch_loss += loss.item()
                writer.add_scalar('Loss/train', loss.item(), global_step)

                pbar.set_postfix(**{'loss (batch)': loss.item()})

                optimizer.zero_grad()
                loss.backward()
                nn.utils.clip_grad_value_(net.parameters(), 0.1)
                optimizer.step()

                pbar.update(imgs.shape[0])
                global_step += 1

                if global_step % (n_train // (2 * batch_size)) == 0:
                    # if global_step % 100 == 0:
                    # Track weight and gradient
                    for tag, value in net.named_parameters():
                        tag = tag.replace('.', '/')
                        writer.add_histogram('weights/' + tag,
                                             value.data.cpu().numpy(),
                                             global_step)
                        writer.add_histogram('grads/' + tag,
                                             value.grad.data.cpu().numpy(),
                                             global_step)

                    val_score, val_score_iou = eval_net(
                        net, val_loader, device)
                    scheduler.step(val_score)

                    writer.add_scalar('learning_rate',
                                      optimizer.param_groups[0]['lr'],
                                      global_step)

                    # Visualize val scores
                    logging.info('Validation Dice Coeff: {}'.format(val_score))
                    logging.info(
                        'Validation IoU Coeff: {}'.format(val_score_iou))
                    writer.add_scalar('Dice/test', val_score, global_step)
                    writer.add_scalar('IoU/test', val_score_iou, global_step)
                    writer.add_images('images', imgs, global_step)

                    if net.n_classes == 1:
                        writer.add_images('masks/true', true_masks,
                                          global_step)
                        writer.add_images('masks/pred',
                                          torch.sigmoid(masks_pred) > 0.5,
                                          global_step)

        # test with sample images
        test_folder = 'test/test_epoch_{}_new'.format(epoch)
        os.makedirs(test_folder)
        dirs = os.listdir('test/test_set')
        device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

        for file in dirs:
            img = Image.open(os.path.join('test/test_set', file))
            mask = predict_img(net=net, full_img=img, device=device)

            result = mask_to_image(mask)
            result.save(os.path.join(test_folder, file))

        if save_cp:
            try:
                os.mkdir(dir_checkpoint)
                logging.info('Created checkpoint directory')
            except OSError:
                pass
            torch.save(net.state_dict(),
                       dir_checkpoint + f'CP_epoch{epoch + 1}.pth')
            logging.info(f'Checkpoint {epoch + 1} saved !')

    writer.close()
コード例 #11
0
    config.read('config.ini')
    patch_width = int(config['Data']['patch_size_x'])
    patch_height = int(config['Data']['patch_size_y'])
    batch_size = int(config['Predict']['batch_size'])
    stride_width = int(config['Predict']['stride_width'])
    stride_height = int(config['Predict']['stride_height'])

    # 'SegNet', 'WNet', 'UNet'
    network = 'WNet'

    # You can replace it by your own path
    path_to_images = './Training_data/org/'
    path_to_save = './predicted/'

    # Path to trained model, run training.py
    path_to_trained_model = f'./Trained_model/{network}/model_Fold_0.json'
    path_to_trained_model_weights = f'./Trained_model/{network}/model_weights_Fold_0.h5'

    # Load model
    model = model_from_json(open(path_to_trained_model).read())
    model.load_weights(path_to_trained_model_weights)

    for im_path in glob(str(Path(path_to_images) / '*')):
        im_name = os.path.split(im_path)[-1]
        save_name = f'{path_to_save}/{im_name}'
        os.makedirs(os.path.split(path_to_save)[0], exist_ok=True)

        prediction = predict_img(im_path, model, patch_height, patch_width,
                                 stride_height, stride_width, batch_size)
        io.imsave(save_name, prediction)