def load_model(self): """ Load everything we need for generating """ # Image-sentence embedding print 'Loading image-sentence embedding...' vse = embedding.load_model(self.config.vsemodel) # VGG-19 print 'Loading and initializing ConvNet...' net = self.build_convnet(self.config.vgg) # Captions print 'Loading captions...' cap = [] with open(self.config.captions, 'rb') as f: for line in f: cap.append(line.strip()) # Caption embeddings print 'Embedding captions...' cvec = embedding.encode_sentences(vse, cap, verbose=False) # Pack up z = {} z['vse'] = vse z['net'] = net z['cap'] = cap z['cvec'] = cvec return z
def load_all(): """ Load everything we need for generating """ print config.paths['decmodel'] # Skip-thoughts print 'Loading skip-thoughts...' stv = skipthoughts.load_model(config.paths['skmodels'], config.paths['sktables']) # Decoder print 'Loading decoder...' dec = decoder.load_model(config.paths['decmodel'], config.paths['dictionary']) # Image-sentence embedding print 'Loading image-sentence embedding...' vse = embedding.load_model(config.paths['vsemodel']) # VGG-19 print 'Loading and initializing ConvNet...' if config.FLAG_CPU_MODE: sys.path.insert(0, config.paths['pycaffe']) import caffe caffe.set_mode_cpu() net = caffe.Net(config.paths['vgg_proto_caffe'], config.paths['vgg_model_caffe'], caffe.TEST) else: net = build_convnet(config.paths['vgg']) # Captions print 'Loading captions...' cap = [] with open(config.paths['captions'], 'rb') as f: for line in f: cap.append(line.strip()) # Caption embeddings print 'Embedding captions...' cvec = embedding.encode_sentences(vse, cap, verbose=False) # Biases print 'Loading biases...' bneg = numpy.load(config.paths['negbias']) bpos = numpy.load(config.paths['posbias']) # Pack up z = {} z['stv'] = stv z['dec'] = dec z['vse'] = vse z['net'] = net z['cap'] = cap z['cvec'] = cvec z['bneg'] = bneg z['bpos'] = bpos return z
def load_all(): """ Load everything we need for generating """ print config.paths['decmodel'] # Skip-thoughts print 'Loading skip-thoughts...' stv = skipthoughts.load_model(config.paths['skmodels'], config.paths['sktables']) # Decoder print 'Loading decoder...' dec = decoder.load_model(config.paths['decmodel'], config.paths['dictionary']) # Image-sentence embedding print 'Loading image-sentence embedding...' vse = embedding.load_model(config.paths['vsemodel']) # Captions print 'Loading captions...' cap = [] with open(config.paths['captions'], 'rb') as f: for line in f: cap.append(line.strip()) # Caption embeddings print 'Embedding captions...' cvec = embedding.encode_sentences(vse, cap, verbose=False) # Biases print 'Loading biases...' bneg = numpy.load(config.paths['negbias']) bpos = numpy.load(config.paths['posbias']) # VGG Net. net = load_vgg() # Pack up z = {} z['stv'] = stv z['dec'] = dec z['vse'] = vse z['net'] = net z['cap'] = cap z['cvec'] = cvec z['bneg'] = bneg z['bpos'] = bpos return z
def load_all(): """ Load everything we need for generating """ print path_to_decmodel # Skip-thoughts print 'Loading skip-thoughts...' stv = skipthoughts.load_model(path_to_skmodels, path_to_sktables) # Decoder print 'Loading decoder...' dec = decoder.load_model(path_to_decmodel, path_to_dictionary) # Image-sentence embedding print 'Loading image-sentence embedding...' vse = embedding.load_model(path_to_vsemodel) # VGG-19 print 'Loading and initializing ConvNet...' net = build_convnet(path_to_vgg) # Captions print 'Loading captions...' cap = [] with open(path_to_captions, 'rb') as f: for line in f: cap.append(line.strip()) # Caption embeddings print 'Embedding captions...' cvec = embedding.encode_sentences(vse, cap, verbose=False) # Biases print 'Loading biases...' bneg = numpy.load(path_to_negbias) bpos = numpy.load(path_to_posbias) # Pack up z = {} z['stv'] = stv z['dec'] = dec z['vse'] = vse z['net'] = net z['cap'] = cap z['cvec'] = cvec z['bneg'] = bneg z['bpos'] = bpos return z
def load_all(c, conn): """ Load everything we need for generating """ def load(field_name, create_field): c.execute("SELECT value FROM neural WHERE name = ?", (field_name, )) cache_field = c.fetchone() if not cache_field: print 'Creating field' field = create_field() c.execute("INSERT INTO neural VALUES (?, ?)", (field_name, pkl.dumps(field))) conn.commit() return field return pkl.loads(str(cache_field[0])) print config.paths['decmodel'] z = {} print 'Loading skip-thoughts...' z['stv'] = skipthoughts.load_model(config.paths['skmodels'], config.paths['sktables']) print 'Loading decoder...' z['dec'] = decoder.load_model(config.paths['decmodel'], config.paths['dictionary']) print 'Loading image-sentence embedding...' z['vse'] = embedding.load_model(config.paths['vsemodel']) print 'Loading and initializing ConvNet (VGG-19)...' z['net'] = create_covnet() print 'Loading captions...' z['cap'] = create_captions() print 'Embedding captions...' z['cvec'] = embedding.encode_sentences(z['vse'], z['cap'], verbose=False) print 'Loading biases...' z['bneg'] = numpy.load(config.paths['negbias']) z['bpos'] = numpy.load(config.paths['posbias']) return z
def load_caption(): # Image-sentence embedding print 'Loading image-sentence embedding...' vse = embedding.load_model(config.paths['vsemodel']) # Captions print 'Loading captions...' cap = [] with open(config.paths['captions'], 'rb') as f: for line in f: cap.append(line.strip()) # cap = cap[:100] # Caption embeddings print 'Embedding captions...' cvec = embedding.encode_sentences(vse, cap, verbose=False) return {'cap': cap, 'cvec': cvec, 'vse': vse}