def forward(self, input): last = input if self.has_adain: level_stats = [] for level in self.levels: last = level(last) if self.has_adain: mean, std = compute_stats(last) level_stats.append(mean) level_stats.append(std) last = last.sub(mean.unsqueeze(2).unsqueeze(3)).div( std.unsqueeze(2).unsqueeze(3)) last = self.fc(last.contiguous().view(input.size(0), self.top_size)) if self.has_adain: stats = torch.cat(level_stats, dim=1) last = torch.cat((last, self.adain_map(stats)), dim=1) return last
def forward(self, input): if self.has_adain: adain_out = self.adain_init(input) if self.has_fc: last = self.fc(input).contiguous().view(input.size(0), *self.top_size) else: last = self.top_value.unsqueeze(0).expand_as( input.size(0), *self.top_value.size()) for i in range(len(self.levels)): if self.has_adain: adain_param = self.adain_maps[i](adain_out) scale = F.softplus(adain_param[:, :adain_param.size(1) // 2]).unsqueeze(2).unsqueeze(3) bias = adain_param[:, adain_param.size(1) // 2:].unsqueeze(2).unsqueeze(3) mean, std = compute_stats(last) last = last.sub(mean.unsqueeze(2).unsqueeze(3)).div( std.unsqueeze(2).unsqueeze(3)).mul(scale).add(bias) last = self.levels[i](last) return last.tanh()
from matplotlib import pyplot as plt from mpl_toolkits.mplot3d import Axes3D # noqa F401 import numpy as np school = sys.argv[1] stat = sys.argv[2].upper() pnames = sys.argv[3:] colors = ['r', 'g', 'b', 'c', 'y', 'm', 'k'] markers = ['o', '*', 'h', 'x', 'D', '+', '^'] patches = [] school_roster = utils.load_roster(utils.roster_file_path(school)) school_schedule = utils.load_scheulde(utils.schedule_file_path(school)) stats, heights, weights = utils.compute_stats(school, stat=stat) fig = plt.figure() ax = fig.add_subplot(111, projection='3d') for i, pname in enumerate(pnames): color = colors[i % len(colors)] marker = markers[i % len(markers)] patches.append(mpatches.Patch(color=color, label=pname)) ax.scatter(np.asarray(heights), np.asarray(weights), zs=np.asarray(stats[pname]), c=color, marker=marker) plt.title('%s Player Stats (%s)' % (school.upper(), stat))
loss_fn = torch.tensor(0, dtype=torch.float) for subsmpl in np.array_split(Sk, max(int(batch_size / ghost_batch), 1)): ops = opfun(X_train[subsmpl]) if cuda: tgts = torch.from_numpy(y_train[subsmpl]).cuda().long().squeeze() else: tgts = torch.from_numpy(y_train[subsmpl]).long().squeeze() loss_fn += F.cross_entropy(ops, tgts) * (len(subsmpl) / batch_size) return loss_fn # perform line search step options = {'closure': closure, 'current_loss': obj} obj, grad, lr, _, _, _, _, _ = optimizer.step(p, grad, options=options) # curvature update optimizer.curvature_update(grad) # compute statistics model.eval() train_loss, test_loss, test_acc = compute_stats(X_train, y_train, X_test, y_test, opfun, accfun, ghost_batch=128) # print data print('Iter:', n_iter + 1, 'lr:', lr, 'Training Loss:', train_loss, 'Test Loss:', test_loss, 'Test Accuracy:', test_acc)
def project_matchup(school1, school2): print('Computing projected winner of %s vs %s...' % (school1.upper(), school2.upper())) try: with suppress_stdout(): school1_schedule = utils.load_scheulde( utils.schedule_file_path(school1)) school1_pts = school1_schedule['Team Points'].values school1_def = school1_schedule['Opponent Points'].values school2_schedule = utils.load_scheulde( utils.schedule_file_path(school2)) school2_pts = school2_schedule['Team Points'].values school2_def = school2_schedule['Opponent Points'].values school1_avg_w, school1_avg_h = utils.avg_physiology(school1) school2_avg_w, school2_avg_h = utils.avg_physiology(school2) _, school1_ws, school1_hs = utils.compute_stats(school1) _, school2_ws, school2_hs = utils.compute_stats(school2) school1_pts_clf = linear_model.LinearRegression() school2_pts_clf = linear_model.LinearRegression() school1_pts_clf.fit([[school1_ws[i], school1_hs[i]] for i in range(len(school1_pts))], school1_pts) school2_pts_clf.fit([[school2_ws[i], school2_hs[i]] for i in range(len(school2_pts))], school2_pts) school1_def_clf = linear_model.LinearRegression() school2_def_clf = linear_model.LinearRegression() school1_def_clf.fit([[school1_ws[i], school1_hs[i]] for i in range(len(school1_def))], school1_def) school2_def_clf.fit([[school2_ws[i], school2_hs[i]] for i in range(len(school2_def))], school2_def) p1 = school1_pts_clf.predict([[school2_avg_w, school2_avg_h]]) p2 = school2_pts_clf.predict([[school1_avg_w, school1_avg_h]]) def1 = school1_def_clf.predict([[school2_avg_w, school2_avg_h]]) def2 = school2_def_clf.predict([[school1_avg_w, school1_avg_h]]) pts1 = (p1 + def2) / 2.0 pts2 = (p2 + def1) / 2.0 if int(pts1) != int(pts2): print('%d - %d, %s projected to win' % (pts1, pts2, school1.upper() if pts1 > pts2 else school2.upper())) else: print('%d - %d TIE, computing tiebreaker...' % (pts1, pts2)) print('%s projected to win' % (school1.upper() if school1_avg_w / school1_avg_h > school2_avg_w / school2_avg_h else school2.upper())) except Exception as e: print('Failed to get projection with error:', e) raise e return
print( 'Epoch %3d | return = %.2f | loss: val = %.2f, pol = %.2f | time = %.2f' % (i, log_return[i], log_loss_value[i], log_loss_policy[i], run_time)) '''learning curve''' f, axes = plt.subplots(1, 2, figsize=(8, 3)) axes[0].plot(log_return) axes[0].set_ylabel('Return') axes[0].set_xlabel('Epoch') axes[1].plot(log_loss_value) axes[1].set_ylabel('Value loss') axes[1].set_xlabel('Epoch') sns.despine() f.tight_layout() '''show behavior''' corrects = log_Y_hat[-1] == log_Y[-1] acc_mu_no_memory, acc_se_no_memory = compute_stats(corrects[:n_unique_example]) acc_mu_has_memory, acc_se_has_memory = compute_stats( corrects[n_unique_example:]) n_se = 2 f, ax = plt.subplots(1, 1, figsize=(7, 4)) ax.errorbar(range(trial_length), y=acc_mu_no_memory, yerr=acc_se_no_memory * n_se, label='w/o memory') ax.errorbar(range(trial_length), y=acc_mu_has_memory, yerr=acc_se_has_memory * n_se, label='w/ memory') ax.axvline(t_noise_off, label='turn off noise', color='grey', linestyle='--') ax.set_xlabel('Time')