コード例 #1
0
def train_model(config):
    transformer = image_normalize('background')
    train_db = abstract_scene(config, split='train', transform=transformer)
    val_db = abstract_scene(config, split='val', transform=transformer)
    test_db = abstract_scene(config, split='test', transform=transformer)

    trainer = SupervisedTrainer(train_db)
    trainer.train(train_db, val_db, test_db)
コード例 #2
0
def test_loader(config):
    from torch.utils.data import DataLoader
    transformer = image_normalize('background')

    # traindb = abstract_scene(config, 'train', transform=transformer)
    # print('traindb', len(traindb))
    # valdb   = abstract_scene(config, 'val', transform=transformer)
    # print('valdb', len(valdb))
    # testdb  = abstract_scene(config, 'test', transform=transformer)
    # print('testdb', len(testdb))

    # print(testdb.scenedb[0]['scene_idx'], testdb.scenedb[-1]['scene_idx'])

    db = abstract_scene(config, 'val', transform=transformer)
    
    loader = DataLoader(db, 
        batch_size=config.batch_size, 
        shuffle=True, 
        num_workers=config.num_workers)
    for cnt, batched in enumerate(loader):
        print(batched['scene_idx'].size())
        print(batched['word_inds'].size())
        print(batched['word_lens'].size())
        print(batched['word_inds'][0])
        print(batched['word_lens'][0])
        print(batched['background'].size())
        print(batched['out_inds'].size())
        print(batched['out_msks'].size())
        print(batched['fg_inds'].size())
        print(batched['hmaps'].size())
        print(batched['out_inds'][0,:,0])
        print(batched['fg_inds'][0])
        break
コード例 #3
0
def test_dataset(config):
    db = abstract_scene(config, 'train')
    plt.switch_backend('agg')
    output_dir = osp.join(config.model_dir, 'test_abstract_scene')
    maybe_create(output_dir)

    indices = np.random.permutation(range(len(db)))
    indices = indices[:config.n_samples]

    for i in indices:
        entry = db[i]
        scene = db.scenedb[i]
        # print('cls_inds: ', scene['cls_inds'])
        imgs = db.render_scene_as_input(scene, True, True)
        name = osp.splitext(osp.basename(entry['color_path']))[0]
        out_path = osp.join(output_dir, name+'.png')
        fig = plt.figure(figsize=(16, 8))
        plt.suptitle(entry['sentence'])

        for j in range(min(len(imgs), 11)):
            plt.subplot(4, 3, j+1)
            plt.imshow(imgs[j,:,:,::-1].astype(np.uint8))
            plt.axis('off')

        target = cv2.imread(entry['color_path'], cv2.IMREAD_COLOR)
        plt.subplot(4, 3, 12)
        plt.imshow(target[:,:,::-1])
        plt.axis('off')
        
        fig.savefig(out_path, bbox_inches='tight')
        plt.close(fig)
コード例 #4
0
def test_lang_vocab(config):
    db = abstract_scene(config, 'train')
    scenedb = db.scenedb
    lang_vocab = db.lang_vocab

    sent_lens = []
    for i in range(len(scenedb)):
        group_sents = scenedb[i]['scene_sentences']
        for j in range(len(group_sents)):
            triplet = group_sents[j]
            for k in range(len(triplet)):
                sentence = triplet[k]
                tokens = word_tokenize(sentence.lower())
                tokens = further_token_process(tokens)
                word_inds = [lang_vocab.word_to_index(w) for w in tokens]
                # word_inds = [wi for wi in word_inds if wi > config.EOS_idx] 
                sent_lens.append(len(word_inds))

    print('sent len: ', np.median(sent_lens), np.amax(sent_lens), np.amin(sent_lens))
    ps = [80.0, 90.0, 95.0, 99.0]
    for p in ps:
        print('p %d/100: '%(int(p)), np.percentile(sent_lens, p))
    # 10.0 50 6

    print("vocab size: ", len(lang_vocab.index2word))
    print("vocab: ", lang_vocab.index2word[:10])

    obj_lens = []
    for i in range(len(scenedb)):
        clses = scenedb[i]['clips']
        obj_lens.append(len(clses))
    print('obj len: ', np.median(obj_lens), np.amax(obj_lens), np.amin(obj_lens))
    for p in ps:
        print('p %d/100: '%(int(p)), np.percentile(obj_lens, p))
コード例 #5
0
def test_clip_and_triplet(config):
    db = abstract_scene(config)
    clip_vocab = db.clip_vocab

    for i in range(len(clip_vocab.index2word)):
        o, p, e = db.clip_to_triplet(i)
        w = clip_vocab.index2word[i]
        print(i, o, p, e, w)
        j = db.triplet_to_clip([o, p, e])
        assert(i == j)
コード例 #6
0
def test_decoder(config):
    transformer = image_normalize('background')
    db = abstract_scene(config, 'train', transform=transformer)

    text_encoder = TextEncoder(db)
    img_encoder = ImageEncoder(config)
    what_decoder = WhatDecoder(config)
    where_decoder = WhereDecoder(config)

    # print(where_decoder)

    loader = DataLoader(db,
                        batch_size=4,
                        shuffle=False,
                        num_workers=config.num_workers)

    for cnt, batched in enumerate(loader):
        word_inds = batched['word_inds'].long()
        word_lens = batched['word_lens'].long()
        bg_imgs = batched['background'].float()

        encoder_states = text_encoder(word_inds, word_lens)
        bg_feats = img_encoder(bg_imgs)

        prev_bgfs = bg_feats[:, 0].unsqueeze(1)
        prev_states = {}
        prev_states['bgfs'] = prev_bgfs
        what_outs = what_decoder(prev_states, encoder_states)

        print('------------------------------------------')
        print('obj_logits', what_outs['obj_logits'].size())
        print('rnn_outs', what_outs['rnn_outs'][0].size())
        print('hids', what_outs['hids'][0].size())
        print('attn_ctx', what_outs['attn_ctx'].size())
        print('attn_wei', what_outs['attn_wei'].size())

        obj_logits = what_outs['obj_logits']
        # print('obj_logits ', obj_logits.size())
        _, obj_inds = torch.max(obj_logits + 1.0, dim=-1)
        curr_fgfs = indices2onehots(obj_inds.cpu().data,
                                    config.output_cls_size)
        # curr_fgfs = curr_fgfs.unsqueeze(1)
        if config.cuda:
            curr_fgfs = curr_fgfs.cuda()
        curr_fgfs = curr_fgfs.float()
        what_outs['fgfs'] = curr_fgfs

        where_outs = where_decoder(what_outs, encoder_states)

        print('coord_logits ', where_outs['coord_logits'].size())
        print('attri_logits ', where_outs['attri_logits'].size())
        print('attn_ctx', where_outs['attn_ctx'].size())
        print('attn_wei', where_outs['attn_wei'].size())

        break
コード例 #7
0
def abstract_demo(config, input_app):
    transformer = image_normalize('background')
    train_db = abstract_scene(config, split='train', transform=transformer)
    trainer = SupervisedTrainer(train_db)
    input_sentences = json_load('examples/abstract_samples.json')
    #print(type(input_sentences))
    #print(input_sentences[0])
    #input_sentences_2 = prepare_data('Mike talks to the dog.,Jenny kicks the soccer ball.,The duck wants to play.')
    #print(input_sentences_2)
    input_app = prepare_data(input_app)
    #print(type(input_sentences_2))
    #print(input_app)
    #print(type(input_app))
    trainer.sample_demo(input_app)
コード例 #8
0
def test_img_encoder(config):
    transformer = image_normalize('background')
    db = abstract_scene(config, 'train', transform=transformer)
    img_encoder = ImageEncoder(config)
    print(get_n_params(img_encoder))

    loader = DataLoader(db,
                        batch_size=config.batch_size,
                        shuffle=False,
                        num_workers=config.num_workers)

    for cnt, batched in enumerate(loader):
        bg_imgs = batched['background'].float()
        y = img_encoder(bg_imgs)
        print(y.size())
        break
コード例 #9
0
def test_txt_encoder_abstract(config):
    transformer = image_normalize('background')
    db = abstract_scene(config, 'train', transform=transformer)
    net = TextEncoder(db)

    loader = DataLoader(db,
                        batch_size=config.batch_size,
                        shuffle=False,
                        num_workers=config.num_workers)

    for cnt, batched in enumerate(loader):
        input_inds = batched['word_inds'].long()
        input_lens = batched['word_lens'].long()

        print('Checking the output shapes')
        out = net(input_inds, input_lens)
        out_rfts = out['rfts']
        out_embs = out['embs']
        out_msks = out['msks']
        out_hids = out['hids']
        print(out_rfts.size(), out_embs.size(), out_msks.size())
        if isinstance(out_hids[0], tuple):
            print(out_hids[0][0].size())
        else:
            print(out_hids[0].size())
        print('m: ', out_msks[-1])

        print('Checking the embedding')
        embeded = net.embedding(input_inds[:, 0, :])
        v1 = embeded[0, 0]
        idx = input_inds[0, 0, 0].data.item()
        v2 = db.lang_vocab.vectors[idx]
        diff = v2 - v1
        print('Diff: (should be zero)', torch.sum(diff.abs_()))

        break
コード例 #10
0
def test_model(config):
    transformer = image_normalize('background')
    db = abstract_scene(config, 'val', transform=transformer)
    net = DrawModel(db)

    plt.switch_backend('agg')
    output_dir = osp.join(config.model_dir, 'test_model')
    maybe_create(output_dir)

    pretrained_path = osp.join(
        '../data/caches/abstract_ckpts/supervised_abstract_top1.pkl')
    assert osp.exists(pretrained_path)
    if config.cuda:
        states = torch.load(pretrained_path)
    else:
        states = torch.load(pretrained_path,
                            map_location=lambda storage, loc: storage)
    net.load_state_dict(states)

    loader = DataLoader(db,
                        batch_size=config.batch_size,
                        shuffle=False,
                        num_workers=config.num_workers)

    net.eval()
    for cnt, batched in enumerate(loader):
        word_inds = batched['word_inds'].long()
        word_lens = batched['word_lens'].long()
        bg_images = batched['background'].float()

        fg_inds = batched['fg_inds'].long()
        gt_inds = batched['out_inds'].long()
        gt_msks = batched['out_msks'].float()
        hmaps = batched['hmaps'].float()

        fg_onehots = indices2onehots(fg_inds, config.output_cls_size)
        fg_onehots = fg_onehots

        inf_outs = net.teacher_forcing(word_inds, word_lens, bg_images,
                                       fg_onehots, hmaps)
        print('teacher forcing')
        print('obj_logits ', inf_outs['obj_logits'].size())
        print('coord_logits ', inf_outs['coord_logits'].size())
        print('attri_logits ', inf_outs['attri_logits'].size())
        if config.what_attn:
            print('what_att_logits ', inf_outs['what_att_logits'].size())
        if config.where_attn > 0:
            print('where_att_logits ', inf_outs['where_att_logits'].size())
        print('----------------------')
        # inf_outs, env = net(word_inds, word_lens, -1, 0, 0, gt_inds)
        inf_outs, env = net(word_inds, word_lens, 0, 1, 0, None)
        print('scheduled sampling')
        print('obj_logits ', inf_outs['obj_logits'].size())
        print('coord_logits ', inf_outs['coord_logits'].size())
        print('attri_logits ', inf_outs['attri_logits'].size())
        if config.what_attn:
            print('what_att_logits ', inf_outs['what_att_logits'].size())
        if config.where_attn > 0:
            print('where_att_logits ', inf_outs['where_att_logits'].size())
        print('----------------------')

        pred_out_inds, pred_out_msks = env.get_batch_inds_and_masks()
        print('pred_out_inds', pred_out_inds[0, 1], pred_out_inds.shape)
        print('gt_inds', gt_inds[0, 1], gt_inds.size())
        print('pred_out_msks', pred_out_msks[0, 1], pred_out_msks.shape)
        print('gt_msks', gt_msks[0, 1], gt_msks.size())

        batch_frames = env.batch_redraw(True)
        scene_inds = batched['scene_idx']
        for i in range(len(scene_inds)):
            sid = scene_inds[i]
            entry = db[sid]
            name = osp.splitext(osp.basename(entry['color_path']))[0]
            imgs = batch_frames[i]
            out_path = osp.join(output_dir, name + '.png')
            fig = plt.figure(figsize=(16, 8))
            plt.suptitle(entry['sentence'])
            for j in range(len(imgs)):
                plt.subplot(4, 3, j + 1)
                plt.imshow(imgs[j, :, :, ::-1].astype(np.uint8))
                plt.axis('off')

            target = cv2.imread(entry['color_path'], cv2.IMREAD_COLOR)
            plt.subplot(4, 3, 12)
            plt.imshow(target[:, :, ::-1])
            plt.axis('off')

            fig.savefig(out_path, bbox_inches='tight')
            plt.close(fig)
        break
コード例 #11
0
def test_simulator(config):
    plt.switch_backend('agg')

    output_dir = osp.join(config.model_dir, 'simulator')
    maybe_create(output_dir)

    transformer = image_normalize('background')
    db = abstract_scene(config, 'val', transform=transformer)

    loader = DataLoader(db,
                        batch_size=config.batch_size,
                        shuffle=False,
                        num_workers=config.num_workers)

    env = simulator(db, config.batch_size)
    env.reset()

    for cnt, batched in enumerate(loader):
        out_inds = batched['out_inds'].long().numpy()
        gt_paths = batched['color_path']
        img_inds = batched['image_idx']
        sents = batched['sentence']

        sequences = []
        masks = []
        for i in range(out_inds.shape[1]):
            frames = env.batch_render_to_pytorch(out_inds[:, i])
            frames = tensor_to_img(frames)
            msks = env.batch_location_maps(out_inds[:, i, 3])
            for j in range(len(frames)):
                frame = frames[j]
                msk = cv2.resize(msks[j], (frame.shape[0], frame.shape[1]),
                                 cv2.INTER_NEAREST)
                frames[j] = frame * (1.0 - msk[..., None])
            sequences.append(frames)
        seqs1 = np.stack(sequences, 1)
        print('seqs1', seqs1.shape)
        seqs2 = env.batch_redraw(return_sequence=True)

        seqs = seqs2
        for i in range(len(seqs)):
            imgs = seqs[i]
            image_idx = img_inds[i]
            name = '%03d_' % i + str(image_idx).zfill(9)
            out_path = osp.join(output_dir, name + '.png')
            color = cv2.imread(gt_paths[i], cv2.IMREAD_COLOR)
            # color, _, _ = create_squared_image(color)

            fig = plt.figure(figsize=(32, 16))
            plt.suptitle(sents[i])

            for j in range(len(imgs)):
                plt.subplot(3, 5, j + 1)
                plt.imshow(imgs[j, :, :, ::-1])
                plt.axis('off')

            plt.subplot(3, 5, 15)
            plt.imshow(color[:, :, ::-1])
            plt.axis('off')

            fig.savefig(out_path, bbox_inches='tight')
            plt.close(fig)

        break
コード例 #12
0
def test_evaluator(config):
    transformer = image_normalize('background')
    db = abstract_scene(config, 'train', transform=transformer)
    output_dir = osp.join(db.cfg.model_dir, 'test_evaluator')
    maybe_create(output_dir)

    ev = evaluator(db)
    for i in range(0, len(db), 2):
        # print('--------------------------------------')
        entry_1 = db[i]
        entry_2 = db[i + 1]
        scene_1 = db.scenedb[i]
        scene_2 = db.scenedb[i + 1]
        name_1 = osp.splitext(osp.basename(entry_1['color_path']))[0]
        name_2 = osp.splitext(osp.basename(entry_2['color_path']))[0]

        graph_1 = scene_graph(db, scene_1, entry_1['out_inds'], True)
        graph_2 = scene_graph(db, scene_2, entry_2['out_inds'], False)

        color_1 = cv2.imread(entry_1['color_path'], cv2.IMREAD_COLOR)
        color_2 = cv2.imread(entry_2['color_path'], cv2.IMREAD_COLOR)

        color_1 = visualize_unigram(config, color_1, graph_1.unigrams,
                                    (225, 0, 0))
        color_2 = visualize_unigram(config, color_2, graph_2.unigrams,
                                    (225, 0, 0))
        color_1 = visualize_bigram(config, color_1, graph_1.bigrams,
                                   (0, 255, 255))
        color_2 = visualize_bigram(config, color_2, graph_2.bigrams,
                                   (0, 255, 255))

        scores = ev.evaluate_graph(graph_1, graph_2)

        color_1 = visualize_unigram(config, color_1, ev.common_pred_unigrams,
                                    (0, 225, 0))
        color_2 = visualize_unigram(config, color_2, ev.common_gt_unigrams,
                                    (0, 225, 0))
        color_1 = visualize_bigram(config, color_1, ev.common_pred_bigrams,
                                   (0, 0, 255))
        color_2 = visualize_bigram(config, color_2, ev.common_gt_bigrams,
                                   (0, 0, 255))

        info = eval_info(config, scores[None, ...])

        plt.switch_backend('agg')
        fig = plt.figure(figsize=(16, 10))
        title = entry_1['sentence'] + '\n' + entry_2['sentence'] + '\n'
        title += 'unigram f3: %f, bigram f3: %f, bigram sim: %f\n' % (
            info.unigram_F3()[0], info.bigram_F3()[0], info.bigram_coord()[0])
        title += 'pose: %f, expr: %f, scale: %f, flip: %f, coord: %f \n' % (
            info.pose()[0], info.expr()[0], info.scale()[0], info.flip()[0],
            info.unigram_coord()[0])

        plt.suptitle(title)
        plt.subplot(1, 2, 1)
        plt.imshow(color_1[:, :, ::-1])
        plt.axis('off')
        plt.subplot(1, 2, 2)
        plt.imshow(color_2[:, :, ::-1])
        plt.axis('off')

        out_path = osp.join(output_dir, name_1 + '_' + name_2 + '.png')
        fig.savefig(out_path, bbox_inches='tight')
        plt.close(fig)

        if i > 40:
            break
コード例 #13
0
def abstract_demo(config):
    transformer = image_normalize('background')
    train_db = abstract_scene(config, split='val', transform=transformer)
    trainer = SupervisedTrainer(train_db)
    input_sentences = json_load('examples/abstract_samples.json')
    trainer.sample_demo(input_sentences)