def validate(dev, gen_test, beam_size, hypo_len, samples, noise_size, glove, cmodel = None, adverse = False, diverse = False): vgen = val_generator(dev, gen_test, beam_size, hypo_len, noise_size) p = Progbar(samples) batchez = [] while p.seen_so_far < samples: batch = next(vgen) preplexity = np.mean(np.power(2, batch[2])) loss = np.mean(batch[2]) losses = [('hypo_loss',loss),('perplexity', preplexity)] if cmodel is not None: ceval = cmodel.evaluate([batch[0], batch[1]], batch[4], verbose = 0) losses += [('class_loss', ceval[0]), ('class_acc', ceval[1])] probs = cmodel.predict([batch[0], batch[1]], verbose = 0) losses += [('class_entropy', np.mean(-np.sum(probs * np.log(probs), axis=1)))] p.add(len(batch[0]), losses) batchez.append(batch) batchez = merge_result_batches(batchez) res = {} if adverse: val_loss = adverse_validation(dev, batchez, glove) print 'adverse_loss:', val_loss res['adverse_loss'] = val_loss if diverse: div, _, _, _ = diversity(dev, gen_test, beam_size, hypo_len, noise_size, 64, 32) res['diversity'] = div print for val in p.unique_values: arr = p.sum_values[val] res[val] = arr[0] / arr[1] return res
def new_generate_dataset(dataset, samples, gen_test, beam_size, hypo_len, noise_size, cmodel): vgen = val_generator(dataset, gen_test, beam_size, hypo_len, noise_size) p = Progbar(samples) batchez = [] while p.seen_so_far < samples: batch = next(vgen) probs = cmodel.predict([batch[0], batch[1]], verbose = 0) batch += (probs,) p.add(len(batch[0])) batchez.append(batch) return merge_result_batches(batchez)
def validate(dev, gen_test, beam_size, hypo_len, samples, noise_size, glove, cmodel=None, adverse=False, diverse=False): vgen = val_generator(dev, gen_test, beam_size, hypo_len, noise_size) p = Progbar(samples) batchez = [] while p.seen_so_far < samples: batch = next(vgen) preplexity = np.mean(np.power(2, batch[2])) loss = np.mean(batch[2]) losses = [('hypo_loss', loss), ('perplexity', preplexity)] if cmodel is not None: ceval = cmodel.evaluate([batch[0], batch[1]], batch[4], verbose=0) losses += [('class_loss', ceval[0]), ('class_acc', ceval[1])] probs = cmodel.predict([batch[0], batch[1]], verbose=0) losses += [('class_entropy', np.mean(-np.sum(probs * np.log(probs), axis=1)))] p.add(len(batch[0]), losses) batchez.append(batch) batchez = merge_result_batches(batchez) res = {} if adverse: val_loss = adverse_validation(dev, batchez, glove) print 'adverse_loss:', val_loss res['adverse_loss'] = val_loss if diverse: div, _, _, _ = diversity(dev, gen_test, beam_size, hypo_len, noise_size, 64, 32) res['diversity'] = div print for val in p.unique_values: arr = p.sum_values[val] res[val] = arr[0] / arr[1] return res