示例#1
0
        def __init__(self,
                     height=64,
                     width=64,
                     with_r=False,
                     with_boundary=False):
            super(AddCoordsTh, self).__init__()
            self.with_r = with_r
            self.with_boundary = with_boundary
            device = torch.device(
                'cuda' if torch.cuda.is_available() else 'cpu')

            with torch.no_grad():
                x_coords = torch.arange(height).unsqueeze(1).expand(
                    height, width).float()
                y_coords = torch.arange(width).unsqueeze(0).expand(
                    height, width).float()
                x_coords = (x_coords / (height - 1)) * 2 - 1
                y_coords = (y_coords / (width - 1)) * 2 - 1
                coords = torch.stack([x_coords, y_coords],
                                     dim=0)  # (2, height, width)

                if self.with_r:
                    rr = torch.sqrt(
                        torch.pow(x_coords, 2) +
                        torch.pow(y_coords, 2))  # (height, width)
                    rr = (rr / torch.max(rr)).unsqueeze(0)
                    coords = torch.cat([coords, rr], dim=0)

                self.coords = coords.unsqueeze(0).to(
                    device)  # (1, 2 or 3, height, width)
                self.x_coords = x_coords.to(device)
                self.y_coords = y_coords.to(device)
示例#2
0
 def __init__(self, loader, loader_ref=None, latent_dim=16, mode=''):
     self.loader = loader
     self.loader_ref = loader_ref
     self.latent_dim = latent_dim
     self.device = porch.device(
         'cuda' if porch.cuda.is_available() else 'cpu')
     self.mode = mode
示例#3
0
        def _load_lpips_weights(self):
            own_state_dict = self.state_dict()

            state_dict = torch.load('lpips_weights.ckpt',
                                        map_location=torch.device('cpu'))
            for name, param in state_dict.items():
                if name in own_state_dict:
                    own_state_dict[name].copy_(param)
 def load_pretrained_weights(self, fname):
     if torch.cuda.is_available():
         checkpoint = torch.load(fname)
     else:
         checkpoint = torch.load(fname, map_location=torch.device('cpu'))
     model_weights = self.state_dict()
     model_weights.update({k: v for k, v in checkpoint['state_dict'].items()
                           if k in model_weights})
     self.load_state_dict(model_weights)
示例#5
0
文件: wing.py 项目: zzz2010/paddorch
 def __init__(self, fname_wing, fname_celeba_mean, output_size):
     self.device = torch.device(
         'cuda' if torch.cuda.is_available() else 'cpu')
     self.fan = FAN(fname_pretrained=fname_wing)
     self.fan.eval()
     scale = output_size // 256
     self.CELEB_REF = np.float32(np.load(fname_celeba_mean)['mean']) * scale
     self.xaxis_ref = landmarks2xaxis(self.CELEB_REF)
     self.output_size = output_size
示例#6
0
 def load(self, step):
     fname = self.fname_template.format(step)
     if not os.path.exists(fname):
         print(fname + ' does not exist!')
         return
     print('Loading checkpoint from %s...' % fname)
     if porch.cuda.is_available():
         module_dict = porch.load(fname)
     else:
         module_dict = porch.load(fname, map_location=porch.device('cpu'))
     for name, module in self.module_dict.items():
         if name in module_dict:
             print(name,"loaded")
             module.load_state_dict(module_dict[name])
示例#7
0
def calculate_lpips_given_images(group_of_images):
    # group_of_images = [porch.randn(N, C, H, W) for _ in range(10)]
    device = porch.device('cuda' if porch.cuda.is_available() else 'cpu')
    lpips = LPIPS(pretrained_weights_fn="./metrics/LPIPS_pretrained.pdparams")
    lpips.eval()
    lpips_values = []
    num_rand_outputs = len(group_of_images)

    # calculate the average of pairwise distances among all random outputs
    for i in range(num_rand_outputs - 1):
        for j in range(i + 1, num_rand_outputs):
            lpips_values.append(lpips(group_of_images[i], group_of_images[j]))
    lpips_value = porch.mean(porch.stack(lpips_values, dim=0))
    return lpips_value.numpy()
示例#8
0
def calculate_fid_given_paths(paths, img_size=256, batch_size=50):
    print('Calculating FID given paths %s and %s...' % (paths[0], paths[1]))
    device = porch.device('cuda' if porch.cuda.is_available() else 'cpu')
    inception = InceptionV3("./metrics/inception_v3_pretrained.pdparams")
    inception.eval()
    loaders = [get_eval_loader(path, img_size, batch_size) for path in paths]

    mu, cov = [], []
    for loader in loaders:
        actvs = []
        for x in tqdm(loader, total=len(loader)):
            x = porch.varbase_to_tensor(x[0])
            actv = inception(x)
            actvs.append(actv)
        actvs = porch.cat(actvs, dim=0).numpy()
        mu.append(np.mean(actvs, axis=0))
        cov.append(np.cov(actvs, rowvar=False))
    fid_value = frechet_distance(mu[0], cov[0], mu[1], cov[1])
    return fid_value.astype(float)
示例#9
0
    def __init__(self, img_size=256, style_dim=64, max_conv_dim=512, w_hpf=1):
        super().__init__()
        dim_in = 2**14 // img_size
        self.img_size = img_size
        self.from_rgb = nn.Conv2d(3, dim_in, 3, 1, 1)
        self.encode = nn.ModuleList()
        self.decode = nn.ModuleList()
        self.to_rgb = nn.Sequential(nn.InstanceNorm2d(dim_in, affine=True),
                                    nn.LeakyReLU(0.2),
                                    nn.Conv2d(dim_in, 3, 1, 1, 0))

        # down/up-sampling blocks
        repeat_num = int(np.log2(img_size)) - 4
        if w_hpf > 0:
            repeat_num += 1
        for _ in range(repeat_num):
            dim_out = min(dim_in * 2, max_conv_dim)
            self.encode.append(
                ResBlk(dim_in, dim_out, normalize=True, downsample=True))
            self.decode.insert(0,
                               AdainResBlk(dim_out,
                                           dim_in,
                                           style_dim,
                                           w_hpf=w_hpf,
                                           upsample=True))  # stack-like
            dim_in = dim_out

        # bottleneck blocks
        for _ in range(2):
            self.encode.append(ResBlk(dim_out, dim_out, normalize=True))
            self.decode.insert(
                0, AdainResBlk(dim_out, dim_out, style_dim, w_hpf=w_hpf))

        if w_hpf > 0:
            device = porch.device(
                'cuda' if porch.cuda.is_available() else 'cpu')
            self.hpf = HighPass(w_hpf, device)
示例#10
0
def calculate_metrics(nets, args, step, mode):
    print('Calculating evaluation metrics...')
    assert mode in ['latent', 'reference']
    device = porch.device('cuda' if porch.cuda.is_available() else 'cpu')
    for name in nets:
        nets[name].eval()
    domains = os.listdir(args.val_img_dir)
    domains.sort()
    num_domains = len(domains)
    print('Number of domains: %d' % num_domains)
    enable_lpips=True # save time to check FID result
    if enable_lpips:
        lpips_dict = OrderedDict()
        for trg_idx, trg_domain in enumerate(domains):
            src_domains = [x for x in domains if x != trg_domain]

            if mode == 'reference':
                path_ref = os.path.join(args.val_img_dir, trg_domain)
                loader_ref = get_eval_loader(root=path_ref,
                                             img_size=args.img_size,
                                             batch_size=args.val_batch_size,
                                             imagenet_normalize=False,
                                             drop_last=True)

            for src_idx, src_domain in enumerate(src_domains):
                path_src = os.path.join(args.val_img_dir, src_domain)
                loader_src = get_eval_loader(root=path_src,
                                             img_size=args.img_size,
                                             batch_size=args.val_batch_size,
                                             imagenet_normalize=False)

                task = '%s2%s' % (src_domain, trg_domain)
                path_fake = os.path.join(args.eval_dir, task)
                shutil.rmtree(path_fake, ignore_errors=True)
                os.makedirs(path_fake)

                lpips_values = []
                print('Generating images and calculating LPIPS for %s...' % task)
                for i, x_src in enumerate(tqdm(loader_src, total=len(loader_src))):
                    x_src=porch.varbase_to_tensor(x_src[0])
                    N = x_src.size(0)
                    y_trg = porch.tensor([trg_idx] * N)
                    masks = nets.fan.get_heatmap(x_src) if args.w_hpf > 0 else None

                    # generate 10 outputs from the same input
                    group_of_images = []
                    for j in range(args.num_outs_per_domain):
                        if mode == 'latent':
                            z_trg = porch.randn(N, args.latent_dim)
                            s_trg = nets.mapping_network(z_trg, y_trg)
                        else:
                            try:
                                x_ref = next(iter_ref)
                            except:
                                iter_ref = iter(loader_ref)
                                x_ref = next(iter_ref)
                            x_ref=porch.varbase_to_tensor(x_ref[0])
                            if x_ref.size(0) > N:
                                x_ref = x_ref[:N]
                            s_trg = nets.style_encoder(x_ref, y_trg)
                        x_fake = nets.generator(x_src, s_trg, masks=masks)

                        group_of_images.append(x_fake)

                        # save generated images to calculate FID later
                        for k in range(N):
                            filename = os.path.join(
                                path_fake,
                                '%.4i_%.2i.png' % (i*args.val_batch_size+(k+1), j+1))
                            utils.save_image(x_fake[k], ncol=1, filename=filename)

                    lpips_value = calculate_lpips_given_images(group_of_images)
                    lpips_values.append(lpips_value)

                # calculate LPIPS for each task (e.g. cat2dog, dog2cat)
                lpips_mean = np.array(lpips_values).mean().astype(float)
                lpips_dict['LPIPS_%s/%s' % (mode, task)] = lpips_mean
             
            # delete dataloaders
            del loader_src
            if mode == 'reference':
                del loader_ref
                del iter_ref

        # calculate the average LPIPS for all tasks
        lpips_mean = 0
        for _, value in lpips_dict.items():
            lpips_mean += value / len(lpips_dict)
        lpips_dict['LPIPS_%s/mean' % mode] = lpips_mean

        # report LPIPS values
        filename = os.path.join(args.eval_dir, 'LPIPS_%.5i_%s.json' % (step, mode))
        utils.save_json(lpips_dict, filename)

    # calculate and report fid values
    return calculate_fid_for_all_tasks(args, domains, step=step, mode=mode)
    for name in nets:
        nets[name].train()
示例#11
0
def main(args_test):
    if os.path.isfile(args_test.load_path):
        print("=> loading checkpoint '{}'".format(args_test.load_path))
        checkpoint = torch.load(args_test.load_path, map_location="cpu")
        print(
            "=> loaded successfully '{}' (epoch {})".format(
                args_test.load_path, checkpoint["epoch"]
            )
        )
    else:
        print("=> no checkpoint found at '{}'".format(args_test.load_path))
    args = checkpoint["opt"]

    assert args_test.gpu is None or torch.cuda.is_available()
    print("Use GPU: {} for generation".format(args_test.gpu))
    args.gpu = args_test.gpu
    args.device = torch.device("cpu") if args.gpu is None else torch.device(args.gpu)

    if args_test.dataset in GRAPH_CLASSIFICATION_DSETS:
        train_dataset = GraphClassificationDataset(
            dataset=args_test.dataset,
            rw_hops=args.rw_hops,
            subgraph_size=args.subgraph_size,
            restart_prob=args.restart_prob,
            positional_embedding_size=args.positional_embedding_size,
        )
    else:
        train_dataset = NodeClassificationDataset(
            dataset=args_test.dataset,
            rw_hops=args.rw_hops,
            subgraph_size=args.subgraph_size,
            restart_prob=args.restart_prob,
            positional_embedding_size=args.positional_embedding_size,
        )
    args.batch_size = len(train_dataset)
    train_loader = torch.utils.data.DataLoader(
        dataset=train_dataset,
        batch_size=args.batch_size,
        collate_fn=batcher(),
        shuffle=False,
        num_workers=args.num_workers,
    )

    # create model and optimizer
    model = GraphEncoder(
        positional_embedding_size=args.positional_embedding_size,
        max_node_freq=args.max_node_freq,
        max_edge_freq=args.max_edge_freq,
        max_degree=args.max_degree,
        freq_embedding_size=args.freq_embedding_size,
        degree_embedding_size=args.degree_embedding_size,
        output_dim=args.hidden_size,
        node_hidden_dim=args.hidden_size,
        edge_hidden_dim=args.hidden_size,
        num_layers=args.num_layer,
        num_step_set2set=args.set2set_iter,
        num_layer_set2set=args.set2set_lstm_layer,
        gnn_model=args.model,
        norm=args.norm,
        degree_input=True,
    )

    model = model.to(args.device)

    model.load_state_dict(checkpoint["model"])

    del checkpoint

    emb = test_moco(train_loader, model, args)
    np.save(os.path.join(args.model_folder, args_test.dataset), emb.numpy())
示例#12
0
    def forward(self, g, return_all_outputs=False):
        """Predict molecule labels

        Parameters
        ----------
        g : DGLGraph
            Input DGLGraph for molecule(s)
        n_feat : tensor of dtype float32 and shape (B1, D1)
            Node features. B1 for number of nodes and D1 for
            the node feature size.
        e_feat : tensor of dtype float32 and shape (B2, D2)
            Edge features. B2 for number of edges and D2 for
            the edge feature size.

        Returns
        -------
        res : Predicted labels
        """

        # nfreq = g.ndata["nfreq"]
        if self.degree_input:
            device = g.ndata["seed"].device
            degrees = g.in_degrees()
            if device != torch.device("cpu"):
                degrees = degrees

            n_feat = torch.cat(
                (
                    g.ndata["pos_undirected"],
                    self.degree_embedding(torch.clamp(degrees,0,self.max_degree)), 
                    g.ndata["seed"].unsqueeze(1).float(),
                ),
                dim=-1,
            )
        else:
            n_feat = torch.cat(
                (
                    g.ndata["pos_undirected"],
                    # g.ndata["pos_directed"],
                    # self.node_freq_embedding(nfreq.clamp(0, self.max_node_freq)),
                    # self.degree_embedding(degrees.clamp(0, self.max_degree)),
                    g.ndata["seed"].unsqueeze(1).float(),
                    # nfreq.unsqueeze(1).float() / self.max_node_freq,
                    # degrees.unsqueeze(1).float() / self.max_degree,
                ),
                dim=-1,
            )

        # efreq = g.edata["efreq"]
        # e_feat = torch.cat(
        #     (
        #         self.edge_freq_embedding(efreq.clamp(0, self.max_edge_freq)),
        #         efreq.unsqueeze(1).float() / self.max_edge_freq,
        #     ),
        #     dim=-1,
        # )
        e_feat = None
        if self.gnn_model == "gin":
            x, all_outputs = self.gnn(g, n_feat, e_feat)
        else:
            x, all_outputs = self.gnn(g, n_feat, e_feat), None
            x = self.set2set(g, x)
            x = self.lin_readout(x)
        if self.norm:
            x = F.normalize(x, p=2, dim=-1, eps=1e-5)
        if return_all_outputs:
            return x, all_outputs
        else:
            return x
示例#13
0
def train_finetune(
    epoch,
    train_loader,
    model,
    output_layer,
    criterion,
    optimizer,
    output_layer_optimizer,
    sw,
    opt,
):
    """
    one epoch training for moco
    """
    n_batch = len(train_loader)
    model.train()
    output_layer.train()

    batch_time = AverageMeter()
    data_time = AverageMeter()
    loss_meter = AverageMeter()
    f1_meter = AverageMeter()
    epoch_loss_meter = AverageMeter()
    epoch_f1_meter = AverageMeter()
    prob_meter = AverageMeter()
    graph_size = AverageMeter()
    max_num_nodes = 0
    max_num_edges = 0

    end = time.time()
    for idx, batch in enumerate(train_loader):
        data_time.update(time.time() - end)
        graph_q, y = batch

        graph_q.to(torch.device(opt.gpu))
        y = y.to(torch.device(opt.gpu))

        bsz = graph_q.batch_size

        # ===================forward=====================

        feat_q = model(graph_q)

        assert feat_q.shape == (graph_q.batch_size, opt.hidden_size)
        out = output_layer(feat_q)

        loss = torch.convertTensor(criterion(out, y))

        # ===================backward=====================
        optimizer.zero_grad()
        output_layer_optimizer.zero_grad()
        loss.backward()
        # torch.nn.utils.clip_grad_value_(model.parameters(), 1)
        # torch.nn.utils.clip_grad_value_(output_layer.parameters(), 1)
        global_step = epoch * n_batch + idx
        lr_this_step = opt.learning_rate * warmup_linear(
            global_step / (opt.epochs * n_batch), 0.1)
        # if lr_this_step is not None:
        #     optimizer.set_lr(lr_this_step)
        #     output_layer_optimizer.set_lr(lr_this_step)

        optimizer.step()
        output_layer_optimizer.step()

        preds = out.argmax(dim=1)
        f1 = f1_score(y.cpu().numpy(), preds.cpu().numpy(), average="micro")

        # ===================meters=====================
        f1_meter.update(f1, bsz)
        epoch_f1_meter.update(f1, bsz)
        loss_meter.update(loss.item(), bsz)
        epoch_loss_meter.update(loss.item(), bsz)
        graph_size.update(graph_q.number_of_nodes() / bsz, bsz)
        max_num_nodes = max(max_num_nodes, graph_q.number_of_nodes())
        max_num_edges = max(max_num_edges, graph_q.number_of_edges())

        batch_time.update(time.time() - end)
        end = time.time()

        # print info
        if (idx + 1) % opt.print_freq == 0:
            mem = psutil.virtual_memory()
            #  print(f'{idx:8} - {mem.percent:5} - {mem.free/1024**3:10.2f} - {mem.available/1024**3:10.2f} - {mem.used/1024**3:10.2f}')
            #  mem_used.append(mem.used/1024**3)
            print("Train: [{0}][{1}/{2}]\t"
                  "BT {batch_time.val:.3f} ({batch_time.avg:.3f})\t"
                  "DT {data_time.val:.3f} ({data_time.avg:.3f})\t"
                  "loss {loss.val:.3f} ({loss.avg:.3f})\t"
                  "f1 {f1.val:.3f} ({f1.avg:.3f})\t"
                  "GS {graph_size.val:.3f} ({graph_size.avg:.3f})\t"
                  "mem {mem:.3f}".format(
                      epoch,
                      idx + 1,
                      n_batch,
                      batch_time=batch_time,
                      data_time=data_time,
                      loss=loss_meter,
                      f1=f1_meter,
                      graph_size=graph_size,
                      mem=mem.used / 1024**3,
                  ))
            #  print(out[0].abs().max())

        # tensorboard logger
        if (idx + 1) % opt.tb_freq == 0:
            sw.add_scalar("ft_loss", loss_meter.avg, global_step)
            sw.add_scalar("ft_f1", f1_meter.avg, global_step)
            sw.add_scalar("graph_size", graph_size.avg, global_step)
            sw.add_scalar("lr", lr_this_step, global_step)
            sw.add_scalar("graph_size/max", max_num_nodes, global_step)
            sw.add_scalar("graph_size/max_edges", max_num_edges, global_step)
            #  sw.add_scalar(
            #      "learning_rate", optimizer.param_groups[0]["lr"], global_step
            #  )
            loss_meter.reset()
            f1_meter.reset()
            graph_size.reset()
            max_num_nodes, max_num_edges = 0, 0
    return epoch_loss_meter.avg, epoch_f1_meter.avg