batch_size=batch_size, shuffle=False, num_workers=0) #################################################################################### # Build the Model #################################################################################### vocab_size = dataset_val.vocab_size ques_length = dataset_val.ques_length ans_length = dataset_val.ans_length + 1 his_length = dataset_val.ans_length + dataset_val.ques_length itow = dataset_val.itow img_feat_size = 512 print('init Generative model...') netG = _netG(opt.model, vocab_size, opt.ninp, opt.nhid, opt.nlayers, opt.dropout, False) netE_g = _netE(opt.model, opt.ninp, opt.nhid, opt.nlayers, opt.dropout, img_feat_size) netW_g = model._netW(vocab_size, opt.ninp, opt.dropout) sampler = model.gumbel_sampler() critG = model.G_loss(opt.ninp) critLM = model.LMCriterion() if opt.evalall == False: # opt.model_path_D != '' and opt.model_path_G != '': print('Loading Generative model...') netW_g.load_state_dict(checkpoint['netW']) netE_g.load_state_dict(checkpoint['netE']) netG.load_state_dict(checkpoint['netG']) else: print('Loading Generative model...') netW_g.load_state_dict(checkpoint['netW_g'])
#################################################################################### # Build the Model #################################################################################### vocab_size = dataset.vocab_size ques_length = dataset.ques_length ans_length = dataset.ans_length + 1 his_length = dataset.ques_length + dataset.ans_length itow = dataset.itow img_feat_size = opt.conv_feat_size netE = _netE(opt.model, opt.ninp, opt.nhid, opt.nlayers, opt.dropout, img_feat_size) netW = model._netW(vocab_size, opt.ninp, opt.dropout) netG = _netG(opt.model, vocab_size, opt.ninp, opt.nhid, opt.nlayers, opt.dropout) critG = model.LMCriterion() sampler = model.gumbel_sampler() if opt.cuda: netW.cuda() netE.cuda() netG.cuda() critG.cuda() sampler.cuda() if opt.model_path != '': netW.load_state_dict(checkpoint['netW']) netE.load_state_dict(checkpoint['netE']) netG.load_state_dict(checkpoint['netG'])
num_workers=int(opt.workers)) #################################################################################### # Build the Model #################################################################################### n_words = dataset_val.vocab_size ques_length = dataset_val.ques_length ans_length = dataset_val.ans_length + 1 his_length = ques_length + dataset_val.ans_length itow = dataset_val.itow img_feat_size = 512 netE = _netE(opt.model, opt.ninp, opt.nhid, opt.nlayers, opt.dropout, img_feat_size) netW = model._netW(n_words, opt.ninp, opt.dropout) netG = _netG(opt.model, n_words, opt.ninp, opt.nhid, opt.nlayers, opt.dropout) critG = model.LMCriterion() sampler = model.gumbel_sampler() if opt.cuda: netW.cpu() netE.cpu() netG.cpu() critG.cpu() sampler.cpu() if opt.model_path != '': netW.load_state_dict(checkpoint['netW_g']) netE.load_state_dict(checkpoint['netE_g']) netG.load_state_dict(checkpoint['netG']) print('Loading model Success!')
dataloader_val = torch.utils.data.DataLoader(dataset_val, batch_size=opt.batchSize, shuffle=False, num_workers=int(opt.workers)) #################################################################################### # Build the Model #################################################################################### n_words = dataset_val.vocab_size ques_length = dataset_val.ques_length ans_length = dataset_val.ans_length + 1 his_length = ques_length+dataset_val.ans_length itow = dataset_val.itow img_feat_size = 512 netE = _netE(opt.model, opt.ninp, opt.nhid, opt.nlayers, opt.dropout, img_feat_size) netW = model._netW(n_words, opt.ninp, opt.dropout) netG = _netG(opt.model, n_words, opt.ninp, opt.nhid, opt.nlayers, opt.dropout) critG = model.LMCriterion() sampler = model.gumbel_sampler() if opt.cuda: netW.cuda() netE.cuda() netG.cuda() critG.cuda() sampler.cuda() if opt.model_path != '': netW.load_state_dict(checkpoint['netW_g']) netE.load_state_dict(checkpoint['netE_g']) netG.load_state_dict(checkpoint['netG'])