Exemplo n.º 1
0
def graphEval(X, truth_spec, true_sp, true_cc, true_dd, true_bc):
    sp_emd_cur = []
    cc_emd_cur = []
    dd_emd_cur = []
    assorts_cur = []
    spec_l2_cur = []
    spec_l2_lin_cur = []
    bc_emd_cur = []

    for j in range(20):
        A = gnp(X)
        G = nx.from_numpy_matrix(A)
        print(nx.is_connected(G))
        if not nx.is_connected(G):
            Gc = max(nx.connected_component_subgraphs(G), key=len)
            print(len(Gc.nodes()))
        sp = utils.sp(A)
        cc = utils.cc(A)
        dd = utils.degree_sequence(A)
        spec_weight_l2 = l2_exp_weight(truth_spec, utils.spectrum(A))
        spec_weight_l2_lin = l2_lin_weight(truth_spec, utils.spectrum(A))
        bc = sorted(nx.betweenness_centrality(G).values())

        sp_emd_cur.append(utils.emd(sp, true_sp))
        cc_emd_cur.append(utils.emd(cc, true_cc))
        dd_emd_cur.append(utils.emd(dd, true_dd))
        assorts_cur.append(nx.degree_assortativity_coefficient(G))
        spec_l2_cur.append(spec_weight_l2)
        spec_l2_lin_cur.append(spec_weight_l2_lin)
        bc_emd_cur.append(utils.emd(bc, true_bc))

    return np.mean(sp_emd_cur), np.mean(cc_emd_cur), np.mean(
        dd_emd_cur), np.mean(assorts_cur), np.mean(spec_l2_cur), np.mean(
            bc_emd_cur), np.mean(spec_l2_lin_cur)
def global_comp(A, X, sps_truth, spec):
    sp_emds = []
    specs = []
    ccs = []
    for i in range(100):
        sampled_graph = gnp(X)
        sps_gen = utils.sp(sampled_graph)
        sp_emds.append(utils.emd(sps_truth, sps_gen))
        specs.append((spec - utils.specGap(sampled_graph))**2)
        ccs.append(3 * utils.statistics_triangle_count(sampled_graph) /
                   utils.statistics_claw_count(sampled_graph))
    stats = {}
    stats['shortest path emds'] = sp_emds
    stats['spec gaps'] = specs
    stats['clustering coefficients'] = ccs
    print('Shortest path')
    print(np.mean(sp_emds))
    print(np.std(sp_emds))
    print('Specs')
    print(np.mean(specs))
    print(np.std(specs))
    print('Clusterig Coefficient')
    print(np.mean(ccs))
    print(np.std(ccs))
    print('True CC')
    tc = 3 * utils.statistics_triangle_count(
        np.array(A)) / utils.statistics_claw_count(np.array(A))
    print(tc)
    print('ABS diff')
    print(tc - np.mean(ccs))
    return stats
def global_comp(A,X,sps_truth,spec):
    sp_emds = []
    specs = []
    for i in range(100):
        sampled_graph = gnp(X)
        sps_gen = utils.sp(sampled_graph)
        sp_emds.append(utils.emd(sps_truth,sps_gen))
        specs.append((spec-utils.specGap(sampled_graph))**2)
    return np.mean(sp_emds), np.std(sp_emds), np.mean(specs), np.std(specs)
Exemplo n.º 4
0
def print_evaluate(A_matrix,sampled_graph):
	stats = utils.compute_graph_statistics(sampled_graph)


	sps_truth = utils.sp(A_matrix)
	sps_gen = utils.sp(sampled_graph)

	stats['sp_emd']= utils.emd(sps_truth,sps_gen)
	s = utils.specGap(A_matrix)
	stats['spec_gap']=(s-utils.specGap(sampled_graph))**2

	print(stats)
def gen_graphs(X_from_walks, target):
    X = genExpected_fromWalks(X_from_walks, target.sum())
    print(X)
    sps_truth = utils.sp(target)
    sp_emds = []
    sgl2s = []
    for i in range(20):
        sampled_graph = gnp(X)
        sps_gen = utils.sp(sampled_graph)
        sp_emd = utils.emd(sps_truth, sps_gen)
        sgl2 = (utils.specGap(target) - utils.specGap(sampled_graph))**2
        sp_emds.append(sp_emd)
        sgl2s.append(sgl2)
    return sp_emds, sgl2s
        cc_emd_cur = []
        dd_emd_cur = []
        assorts_cur = []
        spec_l2_cur = []
        bc_emd_cur = []

        for j in range(20):
            A = gnp(X)
            G = nx.from_numpy_matrix(A)
            sp = utils.sp(A)
            cc = utils.cc(A)
            dd = utils.degree_sequence(A)
            spec_weight_l2 = l2_exp_weight(truth_spec, utils.spectrum(A))
            bc = sorted(nx.betweenness_centrality(G).values())

            sp_emd_cur.append(utils.emd(sp, true_sp))
            cc_emd_cur.append(utils.emd(cc, true_cc))
            dd_emd_cur.append(utils.emd(dd, true_dd))
            assorts_cur.append(nx.degree_assortativity_coefficient(G))
            spec_l2_cur.append(spec_weight_l2)
            bc_emd_cur.append(utils.emd(bc, true_bc))

        sp_emds.append(np.mean(sp_emd_cur))
        cc_emds.append(np.mean(cc_emd_cur))
        dd_emds.append(np.mean(dd_emd_cur))
        assorts.append(np.mean(assorts_cur))
        true_assorts.append(true_assort)
        spectrum_weighted_distances.append(np.mean(spec_l2_cur))
        bc_emds.append(np.mean(bc_emd_cur))

    index = [i * step for i in range(start, k + 1)]
Exemplo n.º 7
0
    def run(net, loader, optimizer, train=False, epoch=0, pool=None):
        writer = train_writer
        if train:
            net.train()
            prefix = "train"
            torch.set_grad_enabled(True)
        else:
            net.eval()
            prefix = "test"
            torch.set_grad_enabled(False)

        if args.export_dir:
            true_export = []
            pred_export = []

        iters_per_epoch = len(loader)
        loader = tqdm(
            loader,
            ncols=0,
            desc="{1} E{0:02d}".format(epoch, "train" if train else "test "),
        )

        for i, sample in enumerate(loader, start=epoch * iters_per_epoch):
            # input is either a set or an image
            input, target_set, target_mask = map(lambda x: x.cuda(), sample)

            # forward evaluation through the network
            (progress, masks, evals,
             gradn), (y_enc, y_label) = net(input, target_set, target_mask)

            progress_only = progress

            # if using mask as feature, concat mask feature into progress
            if args.mask_feature:
                target_set = torch.cat(
                    [target_set, target_mask.unsqueeze(dim=1)], dim=1)
                progress = [
                    torch.cat([p, m.unsqueeze(dim=1)], dim=1)
                    for p, m in zip(progress, masks)
                ]

            if args.loss == "chamfer":
                # dim 0 is over the inner iteration steps
                # target set is broadcasted over dim 0
                set_loss = utils.chamfer_loss(torch.stack(progress),
                                              target_set.unsqueeze(0))
            elif args.loss == "hungarian":
                set_loss = utils.hungarian_loss(progress[-1],
                                                target_set,
                                                thread_pool=pool).unsqueeze(0)
            elif args.loss == "emd":
                set_loss = utils.emd(progress[-1], target_set).unsqueeze(0)

            # Only use representation loss with DSPN and when doing general
            # supervised prediction, not when auto-encoding
            if args.supervised and not args.baseline:
                repr_loss = args.huber_repr * F.smooth_l1_loss(y_enc, y_label)
                loss = set_loss.mean() + repr_loss.mean()
            else:
                loss = set_loss.mean()

            # restore progress variable to not contain masks for correct
            # exporting
            progress = progress_only

            # Outer optim step
            if train:
                optimizer.zero_grad()
                loss.backward()
                optimizer.step()

            # Tensorboard tracking of metrics for debugging
            tracked_last = tracker.update(f"{prefix}_last",
                                          set_loss[-1].item())
            tracked_loss = tracker.update(f"{prefix}_loss", loss.item())
            if train:
                writer.add_scalar("metric/set-loss",
                                  loss.item(),
                                  global_step=i)

                writer.add_scalar("metric/set-last",
                                  set_loss[-1].mean().item(),
                                  global_step=i)

                if not args.baseline:
                    writer.add_scalar("metric/eval-first",
                                      evals[0].mean().item(),
                                      global_step=i)

                    writer.add_scalar("metric/eval-last",
                                      evals[-1].mean().item(),
                                      global_step=i)

                    writer.add_scalar("metric/max-inner-grad-norm",
                                      max(g.item() for g in gradn),
                                      global_step=i)

                    writer.add_scalar("metric/mean-inner-grad-norm",
                                      sum(g.item()
                                          for g in gradn) / len(gradn),
                                      global_step=i)

                    if args.supervised:
                        writer.add_scalar("metric/repr_loss",
                                          repr_loss.item(),
                                          global_step=i)

            # Print current progress to progress bar
            fmt = "{:.6f}".format
            loader.set_postfix(last=fmt(tracked_last),
                               loss=fmt(tracked_loss),
                               bad=fmt(evals[-1].detach().cpu().item() *
                                       1000) if not args.baseline else 0)

            if args.export_dir:
                # export last inner optim of each input as csv
                # (one input per row)
                if args.export_csv:
                    # the second to last element are the last of the
                    # inner optim
                    for batch_i, p in enumerate(progress[-2]):
                        img_id = i * args.batch_size + batch_i

                        names.append(loader.iterable.dataset.get_fname(img_id))

                        m = masks[-2][batch_i]
                        m = m.cpu().detach().numpy().astype(bool)

                        p = p.cpu().detach().numpy()
                        p = p[:, m]

                        sample_preds = [
                            p[k % 2, k // 2] for k in range(p.shape[1] * 2)
                        ]
                        # remove values according to mask and add zeros to the
                        # end in stead
                        sample_preds += [0] * (len(m) * 2 - len(sample_preds))
                        predictions.append(sample_preds)

                        true_mask = target_set[batch_i, 2, :].cpu().detach()
                        true_mask = true_mask.numpy().astype(bool)
                        trues = target_set[batch_i, :2, :]
                        trues = trues.cpu().detach().numpy()

                        t = trues[:, true_mask]

                        t = [t[k % 2, k // 2] for k in range(t.shape[1] * 2)]

                        t += [0] * (len(true_mask) * 2 - len(t))

                        export_targets.append(t)

                # Store predictions to be exported
                else:
                    if len(true_export) < args.export_n:
                        for p, m in zip(target_set, target_mask):
                            true_export.append(p.detach().cpu())
                        progress_steps = []
                        for pro, ms in zip(progress, masks):
                            # pro and ms are one step of the inner optim
                            # score boxes contains the list of predicted
                            # elements for one step
                            score_boxes = []
                            for p, m in zip(pro.cpu().detach(),
                                            ms.cpu().detach()):
                                score_box = torch.cat([m.unsqueeze(0), p],
                                                      dim=0)
                                score_boxes.append(score_box)
                            progress_steps.append(score_boxes)
                        for b in zip(*progress_steps):
                            pred_export.append(b)

            # Plot predictions in Tensorboard
            if args.show and epoch % args.show_skip == 0 and not train:
                name = f"set/epoch-{epoch}/img-{i}"
                # thresholded set
                progress.append(progress[-1])
                masks.append((masks[-1] > 0.5).float())
                # target set
                if args.mask_feature:
                    # target set is augmented with masks, so remove them
                    progress.append(target_set[:, :-1])
                else:
                    progress.append(target_set)
                masks.append(target_mask)
                # intermediate sets

                for j, (s, ms) in enumerate(zip(progress, masks)):
                    if args.dataset == "clevr-state":
                        continue

                    if args.dataset.startswith("clevr"):
                        threshold = 0.5
                    else:
                        threshold = None

                    s, ms = utils.scatter_masked(
                        s,
                        ms,
                        binned=args.dataset.startswith("clevr"),
                        threshold=threshold)

                    if j != len(progress) - 1:
                        tag_name = f"{name}"
                    else:
                        tag_name = f"{name}-target"

                    if args.dataset == "clevr-box":
                        img = input[0].detach().cpu()

                        writer.add_image_with_boxes(tag_name,
                                                    img,
                                                    s.transpose(0, 1),
                                                    global_step=j)
                    elif args.dataset == "cats" \
                            or args.dataset == "wflw" \
                            or args.dataset == "merged":

                        img = input[0].detach().cpu()

                        fig = plt.figure()
                        plt.scatter(s[0, :] * 128, s[1, :] * 128)

                        plt.imshow(np.transpose(img, (1, 2, 0)))

                        writer.add_figure(tag_name, fig, global_step=j)
                    else:  # mnist
                        fig = plt.figure()
                        y, x = s
                        y = 1 - y
                        ms = ms.numpy()
                        rgba_colors = np.zeros((ms.size, 4))
                        rgba_colors[:, 2] = 1.0
                        rgba_colors[:, 3] = ms
                        plt.scatter(x, y, color=rgba_colors)
                        plt.axes().set_aspect("equal")
                        plt.xlim(0, 1)
                        plt.ylim(0, 1)
                        writer.add_figure(tag_name, fig, global_step=j)

        # Export predictions
        if args.export_dir and not args.export_csv:
            os.makedirs(f"{args.export_dir}/groundtruths", exist_ok=True)
            os.makedirs(f"{args.export_dir}/detections", exist_ok=True)
            for i, (gt, dets) in enumerate(zip(true_export, pred_export)):
                export_groundtruths_path = os.path.join(
                    args.export_dir, "groundtruths", f"{i}.txt")

                with open(export_groundtruths_path, "w") as fd:
                    for box in gt.transpose(0, 1):
                        if (box == 0).all():
                            continue
                        s = "box " + " ".join(map(str, box.tolist()))
                        fd.write(s + "\n")

                if args.export_progress:
                    for step, det in enumerate(dets):
                        export_progress_path = os.path.join(
                            args.export_dir, "detections",
                            f"{i}-step{step}.txt")

                        with open(export_progress_path, "w") as fd:
                            for sbox in det.transpose(0, 1):
                                s = f"box " + " ".join(map(str, sbox.tolist()))
                                fd.write(s + "\n")

                export_path = os.path.join(args.export_dir, "detections",
                                           f"{i}.txt")
                with open(export_path, "w") as fd:
                    for sbox in dets[-1].transpose(0, 1):
                        s = f"box " + " ".join(map(str, sbox.tolist()))
                        fd.write(s + "\n")
        .format(i * 100))
    X_r = np.loadtxt(
        'plots/barbell_sameDensity/barbell_walk_mixed/trainingIteration_{}_expectedGraph.txt'
        .format(i * 100))

    cc_emd_combo_iter = []
    cc_emd_fmm_iter = []
    cc_emd_reg_iter = []
    for j in range(100):
        sampled_graph_c = gnp(X_c)
        sampled_graph_f = gnp(X_f)
        sampled_graph_r = gnp(X_r)
        cc_c = utils.cc(sampled_graph_c)
        cc_f = utils.cc(sampled_graph_f)
        cc_r = utils.cc(sampled_graph_r)
        cc_emd_combo_iter.append(utils.emd(truth_cc, cc_c))
        cc_emd_fmm_iter.append(utils.emd(truth_cc, cc_f))
        cc_emd_reg_iter.append(utils.emd(truth_cc, cc_r))
    cc_emd_combo.append(np.mean(cc_emd_combo_iter))
    cc_emd_fmm.append(np.mean(cc_emd_fmm_iter))
    cc_emd_reg.append(np.mean(cc_emd_reg_iter))
    cc_emd_combo_std.append(np.std(cc_emd_combo_iter))
    cc_emd_fmm_std.append(np.std(cc_emd_fmm_iter))
    cc_emd_reg_std.append(np.std(cc_emd_reg_iter))

plt.errorbar(range(1, k), cc_emd_combo, yerr=cc_emd_combo_std, label='Combo')
plt.errorbar(range(1, k), cc_emd_fmm, yerr=cc_emd_fmm_std, label='FMM')
plt.errorbar(range(1, k), cc_emd_reg, yerr=cc_emd_reg_std, label='Reg Walk')
plt.legend(loc='best')
plt.savefig('plots/barbell_sameDensity/barbell_cc_comp.pdf')
plt.gcf().clear()