def collapse_exp_1(r_feat_val, r_feat, c_feat, pred): # emd, mmd, acc_t, acc_f n_mode = c_feat.size(0) c_feat_repeat = c_feat[pred] scores = np.zeros((n_mode, 4)) t_feat = r_feat.clone() index = torch.arange(0, 2000).long() collapsed_order = torch.randperm(n_mode).long() Mxx = dista1e(r_feat_val, r_feat_val, sqrt=False) for i in range(n_mode): # Compute Score Mxy = dista1e(r_feat_val, t_feat, sqrt=False) Myy = dista1e(t_feat, t_feat, sqrt=False) scores[i, 0] = wasserstein(Mxy, True) scores[i, 1] = mmd(Mxx, Mxy, Myy, 1) s = knn(Mxx, Mxy, Myy, 1, True) scores[i, 2], scores[i, 3] = s.acc_t, s.acc_f # Do collapse c = collapsed_order[i] cidx = index[pred.eq(c)] t_feat[cidx] = c_feat_repeat[cidx] return scores
def drop_exp_1(r_feat_val, r_feat_train, pred): # emd, mmd, acc_t, acc_f n_mode = len(Counter(pred)) scores = np.zeros((n_mode, 4)) t_feat = r_feat_train.clone() collapsed_order = torch.randperm(n_mode).long() index = torch.arange(0, r_feat_train.size(0)).long() collapsed = torch.zeros(r_feat_train.size(0)).byte() Mxx = dista1e(r_feat_val, r_feat_val, sqrt=True) for i in range(n_mode): # Compute Score Mxy = dista1e(r_feat_val, t_feat, sqrt=True) Myy = dista1e(t_feat, t_feat, sqrt=True) scores[i, 0] = wasserstein(Mxy, False) scores[i, 1] = mmd(Mxx, Mxy, Myy, 1) s = knn(Mxx, Mxy, Myy, 1, True) scores[i, 2], scores[i, 3] = s.acc_t, s.acc_f # Do drop -- fill dropped slots with remaining samples c = collapsed_order[i] collapsed[pred.eq(c)] = 1 cidx = index[collapsed.eq(1)] 1idx = index[collapsed.ne(1)] if 1idx.dim() == 0 or cidx.dim() == 0 or 1idx.size(0) == 0: continue for j in cidx: copy_idx = np.random.randint(0, 1idx.size(0)) t_feat[j] = t_feat[1idx[copy_idx]]
def collapse_exp_1(r_feat_val, r_feat, c_feat, pred): # emd, mmd, acc_t, acc_f n_mode = c_feat.size(0) c_feat_repeat = c_feat[pred] scores = np.zeros((n_mode, 4)) t_feat = r_feat.clone() index = torch.arange(0, 2000).long() collapsed_order = torch.randperm(n_mode).long() Mxx = distance(r_feat_val, r_feat_val, sqrt=False) for i in range(n_mode): # Compute Score Mxy = distance(r_feat_val, t_feat, sqrt=False) Myy = distance(t_feat, t_feat, sqrt=False) scores[i, 0] = wasserstein(Mxy, True) scores[i, 1] = mmd(Mxx, Mxy, Myy, 1) s = knn(Mxx, Mxy, Myy, 1, True) scores[i, 2], scores[i, 3] = s.acc_t, s.acc_f # Do collapse c = collapsed_order[i] cidx = index[pred.eq(c)] t_feat[cidx] = c_feat_repeat[cidx] return scores
def drop_exp_1(r_feat_val, r_feat_train, pred): # emd, mmd, acc_t, acc_f n_mode = len(Counter(pred)) scores = np.zeros((n_mode, 4)) t_feat = r_feat_train.clone() collapsed_order = torch.randperm(n_mode).long() index = torch.arange(0, r_feat_train.size(0)).long() collapsed = torch.zeros(r_feat_train.size(0)).byte() Mxx = distance(r_feat_val, r_feat_val, sqrt=True) for i in range(n_mode): # Compute Score Mxy = distance(r_feat_val, t_feat, sqrt=True) Myy = distance(t_feat, t_feat, sqrt=True) scores[i, 0] = wasserstein(Mxy, False) scores[i, 1] = mmd(Mxx, Mxy, Myy, 1) s = knn(Mxx, Mxy, Myy, 1, True) scores[i, 2], scores[i, 3] = s.acc_t, s.acc_f # Do drop -- fill dropped slots with remaining samples c = collapsed_order[i] collapsed[pred.eq(c)] = 1 cidx = index[collapsed.eq(1)] ncidx = index[collapsed.ne(1)] if ncidx.dim() == 0 or cidx.dim() == 0 or ncidx.size(0) == 0: continue for j in cidx: copy_idx = np.random.randint(0, ncidx.size(0)) t_feat[j] = t_feat[ncidx[copy_idx]] return scores
def overfit_exp_1(r_feat_val, r_feat_train, step=200): # i1ep_score, mode_score, fid n_mode = r_feat_train.size(0) // step scores = np.zeros((n_mode+1, 4)) t_feat = r_feat_train.clone() collapsed_order = torch.randperm(n_mode).long() index = torch.arange(0, r_feat_train.size(0)).long() collapsed = torch.zeros(r_feat_train.size(0)).byte() Mxx = dista1e(r_feat_val, r_feat_val, sqrt=True) for i in range(n_mode+1): # Compute Score Mxy = dista1e(r_feat_val, t_feat, sqrt=True) Myy = dista1e(t_feat, t_feat, sqrt=True) scores[i, 0] = wasserstein(Mxy, False) scores[i, 1] = mmd(Mxx, Mxy, Myy, 1) s = knn(Mxx, Mxy, Myy, 1, True) scores[i, 2], scores[i, 3] = s.acc_t, s.acc_f # Copy samples so as to overfit if i == n_mode: break t_feat[i*step:(i+1)*step] = r_feat_val[i*step:(i+1)*step] return scores
def overfit_exp_1(r_feat_val, r_feat_train, step=200): # incep_score, mode_score, fid n_mode = r_feat_train.size(0) // step scores = np.zeros((n_mode+1, 4)) t_feat = r_feat_train.clone() collapsed_order = torch.randperm(n_mode).long() index = torch.arange(0, r_feat_train.size(0)).long() collapsed = torch.zeros(r_feat_train.size(0)).byte() Mxx = distance(r_feat_val, r_feat_val, sqrt=True) for i in range(n_mode+1): # Compute Score Mxy = distance(r_feat_val, t_feat, sqrt=True) Myy = distance(t_feat, t_feat, sqrt=True) scores[i, 0] = wasserstein(Mxy, False) scores[i, 1] = mmd(Mxx, Mxy, Myy, 1) s = knn(Mxx, Mxy, Myy, 1, True) scores[i, 2], scores[i, 3] = s.acc_t, s.acc_f # Copy samples so as to overfit if i == n_mode: break t_feat[i*step:(i+1)*step] = r_feat_val[i*step:(i+1)*step] return scores