for imb, ymb in tqdm(iter_data(trX, trY, size=nbatch), total=ntrain / nbatch): imb = transform(imb) ymb = floatX(OneHot(ymb, ny)) zmb = floatX(np_rng.uniform(-1.0, 1.0, size=(len(imb), nz))) if n_updates % (k + 1) == 0: cost = _train_g(imb, zmb, ymb) else: cost = _train_d(imb, zmb, ymb) n_updates += 1 n_examples += len(imb) if (epoch - 1) % 5 == 0: g_cost = float(cost[0]) d_cost = float(cost[1]) gX, gY = gen_samples(100000) gX = gX.reshape(len(gX), -1) va_nnc_acc_1k = nnc_score(gX[:1000], gY[:1000], vaX, vaY, metric="euclidean") va_nnc_acc_10k = nnc_score(gX[:10000], gY[:10000], vaX, vaY, metric="euclidean") va_nnc_acc_100k = nnc_score(gX[:100000], gY[:100000], vaX, vaY, metric="euclidean") va_nnd_1k = nnd_score(gX[:1000], vaX, metric="euclidean") va_nnd_10k = nnd_score(gX[:10000], vaX, metric="euclidean") va_nnd_100k = nnd_score(gX[:100000], vaX, metric="euclidean") log = [ n_epochs, n_updates, n_examples, time() - t, va_nnc_acc_1k, va_nnc_acc_10k, va_nnc_acc_100k, va_nnd_1k, va_nnd_10k,
def _nnc(inputs, labels, f=None): assert len(inputs) == len(labels) == 2 if f is not None: inputs = (_get_feats(f, x) for x in inputs) (vaX, trX), (vaY, trY) = inputs, labels return nnc_score(flat(trX), trY, flat(vaX), vaY, **kwargs)
ymb = floatX(OneHot(ymb, ny)) zmb = floatX(np_rng.uniform(-1., 1., size=(len(imb), nz))) if n_updates % (k + 1) == 0: cost = _train_g(imb, zmb, ymb) else: cost = _train_d(imb, zmb, ymb) n_updates += 1 n_examples += len(imb) if (epoch - 1) % 5 == 0: g_cost = float(cost[0]) d_cost = float(cost[1]) gX, gY = gen_samples(100000) gX = gX.reshape(len(gX), -1) va_nnc_acc_1k = nnc_score(gX[:1000], gY[:1000], vaX, vaY, metric='euclidean') va_nnc_acc_10k = nnc_score(gX[:10000], gY[:10000], vaX, vaY, metric='euclidean') va_nnc_acc_100k = nnc_score(gX[:100000], gY[:100000], vaX, vaY, metric='euclidean') va_nnd_1k = nnd_score(gX[:1000], vaX, metric='euclidean') va_nnd_10k = nnd_score(gX[:10000], vaX, metric='euclidean') va_nnd_100k = nnd_score(gX[:100000], vaX, metric='euclidean')