Пример #1
0
def execute(rgb_image):
    global result
    with torch.no_grad():
        print('python taken!')
        cv2.imwrite("test.png", rgb_image)
        orig_size = rgb_image.shape[:2][::-1]
        img_inp = torch.tensor(prepare_img(rgb_image).transpose(2, 0, 1)[None]).float()
        img_inp = img_inp.cuda()
        segm = net(img_inp)[0].data.cpu().numpy().transpose(1, 2, 0)
        segm = cv2.resize(segm, orig_size, interpolation=cv2.INTER_CUBIC)
        segm = segm.argmax(axis=2).astype(np.uint8)
        res = np.zeros(orig_size[::-1], np.uint8)
        #res[segm == 1] = 1
        print(np.max(segm))
        result = segm
    def do_segmentation(self, img):
        idx = 1

        with torch.no_grad():
            # img = np.array(Image.open(img_path))
            orig_size = img.shape[:2][::-1]

            img_inp = torch.tensor(prepare_img(img).transpose(
                2, 0, 1)[None]).float()
            if self.has_cuda:
                img_inp = img_inp.cuda()

            segm = self.net(img_inp)[0].data.cpu().numpy().transpose(1, 2, 0)
            segm = cv2.resize(segm, orig_size, interpolation=cv2.INTER_CUBIC)
            segm = self.cmap[segm.argmax(axis=2).astype(np.uint8)]

            return segm
Пример #3
0
def single_24(filepath):
    cmap = np.load('../utils/cmap.npy')
    image = np.array(Image.open(filepath))
    orig_size = image.shape[:2][::-1]
    image = torch.tensor(prepare_img(image).transpose(2, 0, 1)[None]).float()
    # 分割
    segm = segmenter(image)[0].data.cpu().numpy().transpose(1, 2, 0)
    segm = cv2.resize(segm, orig_size, interpolation=cv2.INTER_CUBIC)
    segm = cmap[segm.argmax(axis=2).astype(np.uint8)]
    segm = transforms.ToPILImage()(segm)

    name = os.path.basename(filepath)
    print(RESULT_DIR + name)
    segm.save(RESULT_DIR + name)
    # 得到边缘
    img = cv2.imread(RESULT_DIR + name)
    img = cv2.Canny(img, 80, 150)
    cv2.imwrite(EDGE_DIR + name, img, [int(cv2.IMWRITE_JPEG_QUALITY), 80])
Пример #4
0
def segment(framepath,frames):
    cmap = np.load('LightWeight Semantic Segmentation/utils/cmap.npy')
    has_cuda = torch.cuda.is_available()
    n_classes = 60
    
    # Initialise models
    model_inits = { 
    'rf_lw152_context'   : rf_lw152,
    }

    models = dict()
    for key,fun in six.iteritems(model_inits):
        net = fun(n_classes, pretrained=True).eval()
        if has_cuda:
            net = net.cuda()
        models[key] = net
        
    dataset_root = framepath
    output_root = join(framepath,'Seg')
    if not os.path.exists(output_root):
        os.mkdir(output_root)
        
    
    with torch.no_grad():
        dir_frames= join(dataset_root,'Frames')
        dir_out_OF= output_root
        if not os.path.exists(dir_out_OF):
            os.mkdir(dir_out_OF)
        for j in range(0,frames):
            img = np.array(cv2.imread(join(dir_frames,'{:d}'.format(j)+'.jpg'))[:, :, ::-1])
            orig_size = img.shape[:2][::-1]
            
            img_inp = torch.tensor(prepare_img(img).transpose(2, 0, 1)[None]).float()
            if has_cuda:
                img_inp = img_inp.cuda() 
            for mname, mnet in six.iteritems(models):
                segm = mnet.cuda()(img_inp)[0].data.cpu().numpy().transpose(1, 2, 0)
                segm = cv2.resize(segm, orig_size, interpolation=cv2.INTER_CUBIC)
                segm = cmap[segm.argmax(axis=2).astype(np.uint8)]
		
            print(j)
            np.savez_compressed(join(dir_out_OF,'{}'.format(j)+'.npz'), segm)
#     rval, frame = vc.read()
# else:
#     rval = False
#
thresh = 0.7

net.cuda()
with torch.no_grad():
    for img_path in imgs:
        # img = np.array(Image.open(img_path))
        img = cv2.imread(img_path)
        img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)

        orig_size = img.shape[:2][::-1]

        img_inp = torch.tensor(prepare_img(img).transpose(2, 0,
                                                          1)[None]).float()
        if has_cuda:
            img_inp = img_inp.cuda()

        segm = net(img_inp)[0].data.cpu().numpy().transpose(1, 2, 0)
        # print(segm.dtype)
        segm = softmax(segm, axis=2)

        # print(segm)

        segm = cv2.resize(segm, orig_size, interpolation=cv2.INTER_CUBIC)
        # segm = cmap[segm.argmax(axis=2).astype(np.uint8)]
        # print(segm.shape)
        for channel in range(3):
            grid = segm[:, :, channel]
            grid[grid >= thresh] = 1.0
Пример #6
0
def img_path_to_tensor(img_path):
    img = prepare_img(np.array(Image.open(img_path)))
    return torch.tensor(img.transpose(2, 0, 1)[None]).float().to(DEVICE)
Пример #7
0
for img in os.listdir(img_dir):
    if img.endswith('.jpg'):
        imgs.append(os.path.join(img_dir, img))

n_rows = len(imgs)

plt.figure(figsize=(16, 12))
idx = 1

with torch.no_grad():
    for img_path in sorted(imgs):
        img = np.array(Image.open(img_path))
        msk = color_map[np.array(Image.open(img_path.replace('.jpg', '.png')))]
        orig_size = img.shape[:2][::-1]

        img_inp = torch.from_numpy(prepare_img(img).transpose(
            2, 0, 1)[None]).float()

        plt.subplot(n_rows, 3, idx)
        plt.imshow(img)
        plt.title('img')
        plt.axis('off')
        idx += 1

        plt.subplot(n_rows, 3, idx)
        plt.imshow(msk)
        plt.title('gt')
        plt.axis('off')
        idx += 1

        segm = net(img_inp)[0].data.cpu().numpy().transpose(1, 2, 0)
        segm = cv2.resize(segm, orig_size, interpolation=cv2.INTER_CUBIC)
Пример #8
0
def main():
    # Set-up experiment
    args = get_arguments()
    logger = logging.getLogger(__name__)
    exp_name = time.strftime('%H_%M_%S')
    # dir_name = '{}/{}'.format(args.summary_dir, exp_name)
    # if not os.path.exists(dir_name):
    #     os.makedirs(dir_name)
    # arch_writer = open('{}/genotypes.out'.format(dir_name), 'w')
    logger.info(" Running Experiment {}".format(exp_name))
    args.num_tasks = len(NUM_CLASSES[args.dataset_type])
    segm_crit = nn.NLLLoss2d(ignore_index=255).cuda()
    # Set-up random seeds
    torch.manual_seed(args.random_seed)
    if torch.cuda.is_available():
        torch.cuda.manual_seed_all(args.random_seed)
    np.random.seed(args.random_seed)
    random.seed(args.random_seed)

    # Create dataloaders
    train_loader, val_loader, do_search = create_loaders(args)

    def create_segmenter(encoder, decoder_config):
        with torch.no_grad():
            decoder = Decoder(
                inp_sizes=encoder.out_sizes,
                num_classes=NUM_CLASSES[args.dataset_type][0],
                config=decoder_config,
                agg_size=48,  #args.agg_cell_size, what's the fxxk
                aux_cell=True,  #args.aux_cell,
                repeats=1)  #args.sep_repeats)

        # Fuse encoder and decoder
        segmenter = nn.DataParallel(Segmenter(encoder, decoder)).cuda()
        logger.info(" Created Segmenter, #PARAMS (Total, No AUX)={}".format(
            compute_params(segmenter)))
        return segmenter  #, entropy, log_prob

    for decoder_config in decoder_config_arry:
        # Initialise encoder
        encoder = create_encoder()
        logger.info(" Loaded Encoder with #TOTAL PARAMS={:3.2f}M".format(
            compute_params(encoder)[0] / 1e6))
        # Sample first configuration
        segmenter = create_segmenter(encoder, decoder_config)
        del encoder

        logger.info(" Loaded Encoder with #TOTAL PARAMS={:3.2f}M".format(
            compute_params(segmenter)[0] / 1e6))

        # Saver: keeping checkpoint with best validation score (a.k.a best reward)
        now = datetime.datetime.now()

        snapshot_dir = args.snapshot_dir + '_train_' + args.dataset_type + "_{:%Y%m%dT%H%M}".format(
            now)
        seg_saver = seg_Saver(ckpt_dir=snapshot_dir)

        arch_writer = open('{}/genotypes.out'.format(snapshot_dir), 'w')
        arch_writer.write('genotype: {}\n'.format(decoder_config))
        arch_writer.flush()

        logger.info(" Pre-computing data for task0")
        kd_net = None  # stub the kd

        logger.info(" Training Process Starts")
        for task_idx in range(args.num_tasks):  #0,1
            if task_idx == 0:
                continue
            torch.cuda.empty_cache()
            # Change dataloader
            train_loader.batch_sampler.batch_size = BATCH_SIZE[
                args.dataset_type][task_idx]

            logger.info(" Training Task {}".format(str(task_idx)))
            # Optimisers
            optim_enc, optim_dec = create_optimisers(
                args.optim_enc, args.optim_dec, args.lr_enc[task_idx],
                args.lr_dec[task_idx], args.mom_enc[task_idx],
                args.mom_dec[task_idx], args.wd_enc[task_idx],
                args.wd_dec[task_idx], segmenter.module.encoder.parameters(),
                segmenter.module.decoder.parameters())
            kd_crit = None  #stub the kd
            for epoch_segm in range(TRAIN_EPOCH_NUM[args.dataset_type]
                                    [task_idx]):  # [5,1] [20,8]
                final_loss = train_segmenter(
                    segmenter,  #train the segmenter end to end onece
                    train_loader,
                    optim_enc,
                    optim_dec,
                    epoch_segm,
                    segm_crit,
                    args.freeze_bn[1],
                    args.enc_grad_clip,
                    args.dec_grad_clip,
                    args.do_polyak,
                    args.print_every,
                    aux_weight=args.dec_aux_weight,
                    # avg_param=avg_param,
                    polyak_decay=0.99)
        seg_saver.save(final_loss, segmenter.state_dict(), logger)  #stub to 1
        # validat
        segmenter.eval()
        data_file = dataset_dirs[args.dataset_type]['VAL_LIST']
        data_dir = dataset_dirs[args.dataset_type]['VAL_DIR']
        with open(data_file, 'rb') as f:
            datalist = f.readlines()
        try:
            datalist = [
                (k, v) for k, v, _ in \
                map(lambda x: x.decode('utf-8').strip('\n').split('\t'), datalist)]
        except ValueError:  # Adhoc for test.
            datalist = [
                (k, k)
                for k in map(lambda x: x.decode('utf-8').strip('\n'), datalist)
            ]
        imgs_all = [
            os.path.join(data_dir, datalist[i][0])
            for i in range(0, len(datalist))
        ]
        msks_all = [
            os.path.join(data_dir, datalist[i][1])
            for i in range(0, len(datalist))
        ]
        validate_output_dir = os.path.join(
            dataset_dirs[args.dataset_type]['VAL_DIR'], 'validate_output')
        validate_gt_dir = os.path.join(
            dataset_dirs[args.dataset_type]['VAL_DIR'], 'validate_gt')
        if not os.path.exists(validate_output_dir):
            os.makedirs(validate_output_dir)
        else:
            shutil.rmtree(validate_output_dir)
            os.makedirs(validate_output_dir)

        if not os.path.exists(validate_gt_dir):
            os.makedirs(validate_gt_dir)
        else:
            shutil.rmtree(validate_gt_dir)
            os.makedirs(validate_gt_dir)
        # validate_color_dir = os.path.join(dataset_dirs[args.dataset_type]['VAL_DIR'], 'validate_output_color')
        for i, img_path in enumerate(imgs_all):
            # logger.info("Testing image:{}".format(img_path))
            img = np.array(Image.open(img_path))
            msk = np.array(Image.open(msks_all[i]))
            orig_size = img.shape[:2][::-1]

            img_inp = torch.tensor(prepare_img(img).transpose(
                2, 0, 1)[None]).float().to(device)
            segm = segmenter(
                img_inp)[0].squeeze().data.cpu().numpy().transpose(
                    (1, 2, 0))  # 47*63*21
            if args.dataset_type == 'celebA':
                # msk = cv2.resize(msk,segm.shape[0:2],interpolation=cv2.INTER_NEAREST)
                segm = cv2.resize(segm,
                                  orig_size,
                                  interpolation=cv2.INTER_CUBIC)  # 375*500*21
            else:
                segm = cv2.resize(segm,
                                  orig_size,
                                  interpolation=cv2.INTER_CUBIC)  # 375*500*21
            segm = segm.argmax(axis=2).astype(np.uint8)

            image_name = img_path.split('/')[-1].split('.')[0]
            # image_name = val_loader.dataset.datalist[i][0].split('/')[1].split('.')[0]
            # cv2.imwrite(os.path.join(validate_color_dir, "{}.png".format(image_name)), color_array[segm])
            # cv2.imwrite(os.path.join(validate_gt_dir, "{}.png".format(image_name)), color_array[msk])
            cv2.imwrite(
                os.path.join(validate_output_dir, "{}.png".format(image_name)),
                segm)
            cv2.imwrite(
                os.path.join(validate_gt_dir, "{}.png".format(image_name)),
                msk)

        if args.dataset_type == 'celebA':
            cal_f1_score_celebA(validate_gt_dir, validate_output_dir,
                                arch_writer)  # temp comment