コード例 #1
0
import random
import PIL.Image

from framework import lib
from framework import data
from framework import config

sample_size = 200
full_img_max_side = 500
thumb_img_max_side = 100

lib.create_dir(config.results_dir + '/whereimage')
lib.create_dir(config.results_dir + '/whereimage/_sample')

for dataset_name in ['mscoco']:
    print(dataset_name)

    lib.create_dir(config.results_dir + '/whereimage/_sample/' + dataset_name)
    lib.create_dir(config.results_dir + '/whereimage/_sample/' + dataset_name +
                   '/full')
    lib.create_dir(config.results_dir + '/whereimage/_sample/' + dataset_name +
                   '/thumb')

    datasources = data.load_datasources(dataset_name)

    images = datasources['test'].get_filenames()

    caps = dict()
    caps['human'] = [[
        ' '.join(sent) for sent in group
    ] for group in datasources['test'].tokenize_sents().get_text_sent_groups()]
コード例 #2
0

########################################################################################
if len(sys.argv) == 1:
    corpora = 'lm1b,mscoco,flickr8k'.split(',')
else:
    corpora = sys.argv[1].split(',')

datasources = data.load_datasources(config.langmodtrans_capgen_dataset)
capgen_size = datasources['train'].size
capgen_test = data.load_datasources('mscoco')['test'].shuffle(0).take(
    datasources['test'].num_groups, whole_groups=True
)  #MSCOCO test is never used in langmodtrans experiments so we can validate on it
del datasources

lib.create_dir(config.hyperpar_dir + '/langmodtrans')

for corpus in corpora:
    lib.create_dir(config.hyperpar_dir + '/langmodtrans/' + corpus)

    print('=' * 100)
    print(lib.formatted_clock())
    print(corpus, '1 (language model)')
    print()

    if lib.file_exists(config.hyperpar_dir + '/langmodtrans/' + corpus +
                       '/2_best.txt'):
        print('Found ready')
        print()
        continue
コード例 #3
0
import nltk
from scipy.spatial import distance

from framework import lib
from framework import data
from framework import config

lib.create_dir(config.results_dir + '/imageimportance')

for dataset_name in ['flickr8k', 'flickr30k', 'mscoco']:
    print(dataset_name)
    datasources = data.load_datasources(dataset_name)
    datasources['test'].tokenize_sents()

    image_keywords = [{
        token
        for sent in sent_group
        for (token, tag) in nltk.pos_tag(sent, tagset='universal')
        if tag == 'NOUN'
    } for sent_group in datasources['test'].get_text_sent_groups()]

    prog = lib.ProgressBar(len(image_keywords), 5)
    with open(config.results_dir + '/imageimportance/foils_' + dataset_name +
              '.txt',
              'w',
              encoding='utf-8') as f:
        for (i, (curr_img, curr_keywords)) in enumerate(
                zip(datasources['test'].images, image_keywords)):
            index = min(range(len(image_keywords)),
                        key=lambda j:
                        (image_keywords[j] & curr_keywords, -distance.cosine(
コード例 #4
0
import os
import numpy as np
import sys

os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'

from framework import lib
from framework import model_neural_trad
from framework import evaluation
from framework import data
from framework import config

########################################################################################
lib.create_dir(config.results_dir + '/imageimportance')

architecture = 'langmod'
lib.create_dir(config.results_dir + '/imageimportance/' + architecture)
if not lib.file_exists(config.results_dir + '/imageimportance/' +
                       architecture + '/results_langmod.txt'):
    with open(config.results_dir + '/imageimportance/' + architecture +
              '/results_langmod.txt',
              'w',
              encoding='utf-8') as f:
        print('dataset',
              'run',
              'sent_len',
              'token_index',
              'gradient_wrt_prefix_next',
              'gradient_wrt_prefix_max',
              'gradient_wrt_prevtoken_next',
              'gradient_wrt_prevtoken_max',
コード例 #5
0
                self.feature_container[fname] = feature.reshape(
                    self.feature_layer_shape)
            self.next_pos = 0

    def close(self):
        if self.next_pos > 0:
            features = self.sess.run(
                self.feature_layer,
                feed_dict={self.vgg.imgs: self.images[:self.next_pos]})
            for (feature, fname) in zip(features, self.fnames):
                self.feature_container[fname] = feature.reshape(
                    self.feature_layer_shape)
            self.next_pos = 0


lib.create_dir(config.dataset_dir)

#####################################################
# Image caption datasets
#####################################################

with tf.Graph().as_default():
    sess = tf.Session()
    imgs = tf.placeholder(tf.float32, [None, 224, 224, 3])
    vgg = vgg16(imgs, config.vgg16_dir + '/vgg16_weights.npz', sess)

for dataset_name in ['flickr8k', 'flickr30k', 'mscoco']:
    print(dataset_name)

    features = dict()
    dataset = {
コード例 #6
0
              sep='\t')
        self.training_prog = None


########################################################################################
if len(sys.argv) == 1:
    corpora = 'lm1b,mscoco,flickr8k'.split(',')
else:
    corpora = sys.argv[1].split(',')

datasources = data.load_datasources(config.langmodtrans_capgen_dataset)
capgen_size = datasources['train'].size
capgen_test = datasources['test']
del datasources

lib.create_dir(config.results_dir + '/langmodtrans')

for corpus in corpora:
    lib.create_dir(config.results_dir + '/langmodtrans/' + corpus)
    if not lib.file_exists(config.results_dir + '/langmodtrans/' + corpus +
                           '/results.txt'):
        with open(config.results_dir + '/langmodtrans/' + corpus +
                  '/results.txt',
                  'w',
                  encoding='utf-8') as f:
            print('corpus',
                  'frozen_prefix',
                  'corpus_size_factor_exponent',
                  'run',
                  'corpus_size',
                  'langmod_vocab_size',
コード例 #7
0
vocab = datasources['train'].tokenize_sents().text_sents.get_vocab(
    config.min_token_freq)
dataset = data.Dataset(
    vocab=vocab,
    train_datasource=datasources['train'],
    val_datasource=datasources['val'],
    test_datasource=data.load_datasources('mscoco')['val'].shuffle(0).take(
        datasources['test'].num_groups, whole_groups=True),
)
dataset.compile_sents()

test_images = dataset.test.get_images()
test_sents = dataset.test.get_text_sent_groups()

lib.create_dir(config.hyperpar_dir + '/whereimage')

for architecture in architectures:
    lib.create_dir(config.hyperpar_dir + '/whereimage/' + architecture)

    print('=' * 100)
    print(lib.formatted_clock())
    print(architecture)
    print()

    if lib.file_exists(config.hyperpar_dir + '/whereimage/' + architecture +
                       '/best.txt'):
        print('Found ready')
        print()
        continue
コード例 #8
0
        print(round(train_logpplx, 3), round(val_logpplx, 3), lib.format_duration(self.epoch_timer.get_duration()), sep='\t')
        self.training_prog = None
        
        
########################################################################################
if len(sys.argv) == 1:
    corpora = 'flickr8k,mscoco,lm1b'.split(',')
else:
    corpora = sys.argv[1].split(',')

datasources = data.load_datasources(config.langmodtrans_capgen_dataset)
capgen_size = datasources['train'].size
capgen_test = datasources['test']
del datasources

lib.create_dir(config.results_dir+'/partialtraining')

for corpus in corpora:
    lib.create_dir(config.results_dir+'/partialtraining/'+corpus)
    if not lib.file_exists(config.results_dir+'/partialtraining/'+corpus+'/results1_earlystop.txt'):
        with open(config.results_dir+'/partialtraining/'+corpus+'/results1_earlystop.txt', 'w', encoding='utf-8') as f:
            print(
                    'corpus',
                    'frozen_prefix',
                    'max_epochs',
                    'run',
                    'corpus_size',
                    'langmod_vocab_size',
                    'langmod_num_params',
                    'langmod_mean_prob',
                    'langmod_median_prob',
コード例 #9
0
    def epoch_ready(self, model, epoch_num, train_logpplx, val_logpplx):
        if epoch_num == 0:
            print(' '*lib.ProgressBar.width(5), end=' | \t')
        else:
            print(' | ', end='\t')
        print(round(train_logpplx, 3), round(val_logpplx, 3), lib.format_duration(self.epoch_timer.get_duration()), sep='\t')
        self.training_prog = None
        
        
########################################################################################
datasources = data.load_datasources(config.langmodtrans_capgen_dataset)
capgen_size = datasources['train'].size
capgen_test = datasources['test']
del datasources

lib.create_dir(config.results_dir+'/randomrnn')

corpus = 'flickr8k'
lib.create_dir(config.results_dir+'/randomrnn/'+corpus)
if not lib.file_exists(config.results_dir+'/randomrnn/'+corpus+'/results.txt'):
    with open(config.results_dir+'/randomrnn/'+corpus+'/results.txt', 'w', encoding='utf-8') as f:
        print(
                'corpus',
                'frozen_prefix',
                'max_epochs',
                'run',
                'corpus_size',
                'langmod_vocab_size',
                'langmod_num_params',
                'langmod_mean_prob',
                'langmod_median_prob',
コード例 #10
0
        else:
            print(' | ', end='\t')
        print(round(train_logpplx, 3),
              round(val_logpplx, 3),
              lib.format_duration(self.epoch_timer.get_duration()),
              sep='\t')
        self.training_prog = None


########################################################################################
if len(sys.argv) == 1:
    architectures = 'ceiling,merge,par,pre,init,merge-ext'.split(',')
else:
    architectures = sys.argv[1].split(',')

lib.create_dir(config.results_dir + '/whereimage')

for architecture in architectures:
    lib.create_dir(config.results_dir + '/whereimage/' + architecture)
    if not lib.file_exists(config.results_dir + '/whereimage/' + architecture +
                           '/results.txt'):
        with open(config.results_dir + '/whereimage/' + architecture +
                  '/results.txt',
                  'w',
                  encoding='utf-8') as f:
            print('architecture',
                  'dataset',
                  'run',
                  'vocab_size',
                  'num_params',
                  'mean_prob',