Esempio n. 1
0
    def forward(self, inputs):
        x = self.down_layer(inputs)

        gap = porch.nn.functional.adaptive_avg_pool2d(x, 1)
        gap_logit = self.gap_fc(gap.view(x.shape[0], -1))
        gap_weight = porch.Tensor(list(self.gap_fc.parameters())[0]).permute(1,0)
        gap = x * gap_weight.unsqueeze(2).unsqueeze(3)

        gmp = porch.nn.functional.adaptive_max_pool2d(x, 1)
        gmp_logit = self.gmp_fc(gmp.view(x.shape[0], -1))
        gmp_weight =  porch.Tensor(list(self.gmp_fc.parameters())[0]).permute(1,0)
        gmp = x * gmp_weight.unsqueeze(2).unsqueeze(3)


        cam_logit = porch.cat([gap_logit, gmp_logit], 1)
        x = porch.cat([gap, gmp], 1)
        x = self.relu(self.conv1x1(x))
        x =porch.Tensor(x)
        heatmap = porch.sum(x, dim=1, keepdim=True)
        if self.light:
            x_ = porch.nn.functional.adaptive_avg_pool2d(x, 1)
            x_ = self.fc(x_.view(x_.shape[0], -1))
        else:
            x_ = self.fc(x.view(x.shape[0], -1))



        gamma, beta = self.gamma(x_), self.beta(x_)

        for i in range(self.n_res):
            x = getattr(self, "ResNetAdaILNBlock_" + str(i + 1))(x, gamma, beta)
        out = self.up_layer(x)

        return out, cam_logit ,heatmap
    def forward(self, input):
        x = self.DownBlock(input)

        gap = torch.nn.functional.adaptive_avg_pool2d(x, 1)
        gap_logit = self.gap_fc(gap.view(x.shape[0], -1))
        gap_weight = list(self.gap_fc.parameters())[0]
        gap = x * gap_weight.unsqueeze(2).unsqueeze(3)

        gmp = torch.nn.functional.adaptive_max_pool2d(x, 1)
        gmp_logit = self.gmp_fc(gmp.view(x.shape[0], -1))
        gmp_weight = list(self.gmp_fc.parameters())[0]
        gmp = x * gmp_weight.unsqueeze(2).unsqueeze(3)

        cam_logit = torch.cat([gap_logit, gmp_logit], 1)
        x = torch.cat([gap, gmp], 1)
        x = self.relu(self.conv1x1(x))

        heatmap = torch.sum(x, dim=1, keepdim=True)

        if self.light:
            x_ = torch.nn.functional.adaptive_avg_pool2d(x, 1)
            x_ = self.FC(x_.view(x_.shape[0], -1))
        else:
            x_ = self.FC(x.view(x.shape[0], -1))
        gamma, beta = self.gamma(x_), self.beta(x_)

        for i in range(self.n_blocks):
            x = getattr(self, 'UpBlock1_' + str(i + 1))(x, gamma, beta)
        out = self.UpBlock2(x)

        return out, cam_logit, heatmap
Esempio n. 3
0
def video_ref(nets, args, x_src, x_ref, y_ref, fname):
    x_ref.stop_gradient = True
    y_ref.stop_gradient = True
    x_src.stop_gradient = True

    video = []
    s_ref = nets.style_encoder(x_ref, y_ref)
    s_prev = None
    for data_next in tqdm(zip(x_ref, y_ref, s_ref), 'video_ref', len(x_ref)):
        x_next, y_next, s_next = [
            porch.varbase_to_tensor(d).unsqueeze(0) for d in data_next
        ]
        if s_prev is None:
            x_prev, y_prev, s_prev = x_next, y_next, s_next
            continue
        if y_prev != y_next:
            x_prev, y_prev, s_prev = x_next, y_next, s_next
            continue

        interpolated = interpolate(nets, args, x_src, s_prev, s_next)
        entries = [x_prev, x_next]
        slided = slide(entries)  # (T, C, 256*2, 256)
        frames = porch.cat([slided, interpolated],
                           dim=3).cpu()  # (T, C, 256*2, 256*(batch+1))
        video.append(frames)
        x_prev, y_prev, s_prev = x_next, y_next, s_next

    # append last frame 10 time
    for _ in range(10):
        video.append(frames[-1:])
    video = tensor2ndarray255(porch.cat(video))
    save_video(fname, video)
Esempio n. 4
0
def accumulate_inception_activations(sample, net, num_inception_images=50000):
  pool, logits, labels = [], [], []
  while (torch.cat(logits, 0).shape[0] if len(logits) else 0) < num_inception_images:
    with torch.no_grad():
      images, labels_val = sample()
      pool_val, logits_val = net(images.astype("float32"))
      pool += [pool_val]
      logits += [F.softmax(logits_val, 1)]
      labels += [labels_val]
  return torch.cat(pool, 0), torch.cat(logits, 0), torch.cat(labels, 0)
Esempio n. 5
0
def video_latent(nets, args, x_src, y_list, z_list, psi, fname):
    x_src.stop_gradient = True

    latent_dim = z_list[0].size(1)
    s_list = []
    for i, y_trg in enumerate(y_list):
        z_many = porch.randn(10000, latent_dim)
        y_many = porch.LongTensor(10000).fill_(y_trg[0])
        s_many = nets.mapping_network(z_many, y_many)
        s_avg = porch.mean(s_many, dim=0, keepdim=True)
        s_avg = s_avg.repeat(x_src.size(0), 1)

        for z_trg in z_list:
            s_trg = nets.mapping_network(z_trg, y_trg)
            s_trg = porch.lerp(s_avg, s_trg, psi)
            s_list.append(s_trg)

    s_prev = None
    video = []
    # fetch reference images
    for idx_ref, s_next in enumerate(tqdm(s_list, 'video_latent',
                                          len(s_list))):
        if s_prev is None:
            s_prev = s_next
            continue
        if idx_ref % len(z_list) == 0:
            s_prev = s_next
            continue
        frames = interpolate(nets, args, x_src, s_prev, s_next).cpu()
        video.append(frames)
        s_prev = s_next
    for _ in range(10):
        video.append(frames[-1:])
    video = tensor2ndarray255(porch.cat(video))
    save_video(fname, video)
Esempio n. 6
0
def _rwr_trace_to_dgl_graph(g,
                            seed,
                            trace,
                            positional_embedding_size,
                            entire_graph=False):
    subv = torch.unique(torch.cat(trace)).detach().cpu().numpy().tolist()
    try:
        subv.remove(seed)
    except ValueError:
        pass
    subv = [seed] + subv
    if entire_graph:
        subg = g.subgraph(g.nodes())
    else:
        subg = g.subgraph(subv)

    subg = _add_undirected_graph_positional_embedding(
        subg, positional_embedding_size)

    subg.ndata["seed"] = torch.zeros(subg.number_of_nodes(), dtype=torch.long)
    if entire_graph:
        subg.ndata["seed"][seed] = 1
    else:
        subg.ndata["seed"][0] = 1
    return subg
Esempio n. 7
0
        def __init__(self,
                     height=64,
                     width=64,
                     with_r=False,
                     with_boundary=False):
            super(AddCoordsTh, self).__init__()
            self.with_r = with_r
            self.with_boundary = with_boundary
            device = torch.device(
                'cuda' if torch.cuda.is_available() else 'cpu')

            with torch.no_grad():
                x_coords = torch.arange(height).unsqueeze(1).expand(
                    height, width).float()
                y_coords = torch.arange(width).unsqueeze(0).expand(
                    height, width).float()
                x_coords = (x_coords / (height - 1)) * 2 - 1
                y_coords = (y_coords / (width - 1)) * 2 - 1
                coords = torch.stack([x_coords, y_coords],
                                     dim=0)  # (2, height, width)

                if self.with_r:
                    rr = torch.sqrt(
                        torch.pow(x_coords, 2) +
                        torch.pow(y_coords, 2))  # (height, width)
                    rr = (rr / torch.max(rr)).unsqueeze(0)
                    coords = torch.cat([coords, rr], dim=0)

                self.coords = coords.unsqueeze(0).to(
                    device)  # (1, 2 or 3, height, width)
                self.x_coords = x_coords.to(device)
                self.y_coords = y_coords.to(device)
Esempio n. 8
0
  def forward(self, z, y):
    # If hierarchical, concatenate zs and ys
    if self.hier:
      zs = torch.split(z, self.z_chunk_size, 1)
      z = zs[0]
      ys = [torch.cat([y, item], 1) for item in zs[1:]]
    else:
      ys = [y] * len(self.blocks)
      
    # First linear layer
    h = torch.Tensor(self.linear(z))


    # Reshape
    h = h.view(h.size(0), -1, self.bottom_width, self.bottom_width)
    
    # Loop over blocks
    for index, blocklist in enumerate(self.blocks):
      # Second inner loop in case block has multiple layers
      for block in blocklist:
        h = block(h, torch.Tensor(ys[index]))

        
    # Apply batchnorm-relu-conv-tanh at output
    return torch.tanh(self.output_layer(h))
Esempio n. 9
0
def translate_using_latent(nets, args, x_src, y_trg_list, z_trg_list, psi,
                           filename):
    n_images = 100
    x_src.stop_gradient = True
    N, C, H, W = x_src.shape
    latent_dim = z_trg_list[0].shape[1]
    x_concat = [x_src]
    masks = nets.fan.get_heatmap(x_src) if args.w_hpf > 0 else None

    for i, y_trg in enumerate(y_trg_list):
        z_many = porch.randn(n_images, latent_dim)
        # y_many = porch.LongTensor(10000).fill_(y_trg[0])
        y_many = np.empty([n_images])
        y_many.fill(y_trg[0].numpy()[0])
        y_many = to_variable(y_many)
        s_many = nets.mapping_network(z_many, y_many)
        s_avg = porch.mean(s_many, dim=0, keepdim=True)
        s_avg = s_avg.repeat(N, 1)

        for z_trg in z_trg_list:
            s_trg = nets.mapping_network(z_trg, y_trg)
            s_trg = porch.lerp(s_avg, s_trg, psi)
            x_fake = nets.generator(x_src, s_trg, masks=masks)
            x_concat += [x_fake]

    x_concat = porch.cat(x_concat, dim=0)
    save_image(x_concat, N, filename)
Esempio n. 10
0
    def forward(self,
                z,
                gy,
                x=None,
                dy=None,
                train_G=False,
                return_G_z=False,
                split_D=False):
        # If training G, enable grad tape
        if train_G:
            self.G.train()
        else:
            self.G.eval()
        # Get Generator output given noise

        G_z = self.G(z, self.G.shared(gy))
        # Cast as necessary

        # Split_D means to run D once with real data and once with fake,
        # rather than concatenating along the batch dimension.
        if split_D:
            D_fake = self.D(G_z, gy)
            if x is not None:
                D_real = self.D(x, dy)
                return D_fake, D_real
            else:
                if return_G_z:
                    return D_fake, G_z
                else:
                    return D_fake
        # If real data is provided, concatenate it with the Generator's output
        # along the batch dimension for improved efficiency.
        else:
            if x is not None and x.shape[-1] != G_z.shape[-1]:
                x = F.interpolate(x, size=G_z.shape[-2:])
            D_input = torch.cat([G_z, x], 0) if x is not None else G_z
            D_class = torch.cat([gy, dy], 0) if dy is not None else gy
            # Get Discriminator output
            D_out = self.D(D_input, D_class)
            if x is not None:
                return torch.split(
                    D_out, [G_z.shape[0], x.shape[0]])  # D_fake, D_real
            else:
                if return_G_z:
                    return D_out, G_z
                else:
                    return D_out
        def forward(self, x, heatmap=None):
            """
            x: (batch, c, x_dim, y_dim)
            """
            coords = self.coords.repeat(x.size(0), 1, 1, 1)

            if self.with_boundary and heatmap is not None:
                boundary_channel = torch.clamp(heatmap[:, -1:, :, :], 0.0, 1.0)
                zero_tensor = torch.zeros_like(self.x_coords)
                xx_boundary_channel = torch.where(boundary_channel > 0.05, self.x_coords, zero_tensor).to(
                    zero_tensor.device)
                yy_boundary_channel = torch.where(boundary_channel > 0.05, self.y_coords, zero_tensor).to(
                    zero_tensor.device)
                coords = torch.cat([coords, xx_boundary_channel, yy_boundary_channel], dim=1)

            x_and_coords = torch.cat([x, coords], dim=1)
            return x_and_coords
Esempio n. 12
0
 def __iter__(self):
     degrees = torch.cat([g.in_degrees().double() ** 0.75 for g in self.graphs])
     prob = degrees / torch.sum(degrees)
     samples = np.random.choice(
         self.length, size=self.num_samples, replace=True, p=prob.numpy()
     )
     for idx in samples:
         yield self.__getitem__(idx)
Esempio n. 13
0
def interpolate(nets, args, x_src, s_prev, s_next):
    ''' returns T x C x H x W '''
    B = x_src.shape[0]
    frames = []
    masks = nets.fan.get_heatmap(x_src) if args.w_hpf > 0 else None
    alphas = get_alphas()

    for alpha in alphas:
        s_ref = porch.lerp(s_prev, s_next, alpha)
        x_fake = nets.generator(x_src, s_ref, masks=masks)
        entries = porch.cat([x_src, x_fake], dim=2)
        frame = porchvision.utils.make_grid(entries,
                                            nrow=B,
                                            padding=0,
                                            pad_value=-1).unsqueeze(0)
        frames.append(frame)
    frames = porch.cat(frames)
    return frames
Esempio n. 14
0
 def _transform_input(self, x):
     if self.transform_input:
         x_ch0 = torch.unsqueeze(x[:, 0],
                                 1) * (0.229 / 0.5) + (0.485 - 0.5) / 0.5
         x_ch1 = torch.unsqueeze(x[:, 1],
                                 1) * (0.224 / 0.5) + (0.456 - 0.5) / 0.5
         x_ch2 = torch.unsqueeze(x[:, 2],
                                 1) * (0.225 / 0.5) + (0.406 - 0.5) / 0.5
         x = torch.cat((x_ch0, x_ch1, x_ch2), 1)
     return x
 def get_landmark(self, x):
     x.stop_gradient = True
     ''' outputs landmarks of x.shape '''
     heatmaps = self.get_heatmap(x, b_preprocess=False)
     landmarks = []
     for i in range(x.size(0)):
         pred_landmarks = get_preds_fromhm(heatmaps[i].unsqueeze(0))
         landmarks.append(pred_landmarks)
     scale_factor = x.size(2) // heatmaps.size(2)
     landmarks = torch.cat(landmarks) * scale_factor
     return landmarks
Esempio n. 16
0
def translate_using_reference(nets, args, x_src, x_ref, y_ref, filename):
    x_ref.stop_gradient = True
    y_ref.stop_gradient = True
    x_src.stop_gradient = True

    N, C, H, W = x_src.shape
    wb = porch.ones(1, C, H, W)
    x_src_with_wb = porch.cat([wb, x_src], dim=0)

    masks = nets.fan.get_heatmap(x_src) if args.w_hpf > 0 else None
    s_ref = nets.style_encoder(x_ref, y_ref)
    s_ref_list = s_ref.unsqueeze(1).repeat(1, N, 1)
    x_concat = [x_src_with_wb]
    for i, s_ref in enumerate(s_ref_list):
        x_fake = nets.generator(x_src, s_ref, masks=masks)
        x_fake_with_ref = porch.cat([x_ref[i:i + 1], x_fake], dim=0)
        x_concat += [x_fake_with_ref]

    x_concat = porch.cat(x_concat, dim=0)
    save_image(x_concat, N + 1, filename)
    del x_concat
Esempio n. 17
0
    def forward(self, inputs):
        x = self.model(inputs)

        gap = porch.nn.functional.adaptive_avg_pool2d(x, 1)
        gap_logit = self.gap_fc(gap.view(x.shape[0], -1))
        gap_weight = list(self.gap_fc.parameters())[0]
        gap = x * porch.Tensor(gap_weight).unsqueeze(0).unsqueeze(3)

        gmp = porch.nn.functional.adaptive_max_pool2d(x, 1)
        gmp_logit = self.gmp_fc(gmp.view(x.shape[0], -1))
        gmp_weight = list(self.gmp_fc.parameters())[0]
        gmp = x * porch.Tensor(gmp_weight).unsqueeze(0).unsqueeze(3)

        cam_logit = porch.cat([gap_logit, gmp_logit], 1)
        x = porch.cat([gap, gmp], 1)
        x = self.leaky_relu(self.conv1x1(x))

        heatmap = porch.sum(x, dim=1, keepdim=True)
        x = self.pad(x)
        out = self.conv(x)

        return out, cam_logit,heatmap
Esempio n. 18
0
def preprocess(x):
    """Preprocess 98-dimensional heatmaps."""
    N, C, H, W = x.size()
    x = truncate(x)
    x = normalize(x)

    sw = H // 256
    operations = Munch(chin=OPPAIR(0, 3),
                       eyebrows=OPPAIR(-7 * sw, 2),
                       nostrils=OPPAIR(8 * sw, 4),
                       lipupper=OPPAIR(-8 * sw, 4),
                       liplower=OPPAIR(8 * sw, 4),
                       lipinner=OPPAIR(-2 * sw, 3))

    for part, ops in operations.items():
        start, end = index_map[part]
        x[:, start:end] = resize(shift(x[:, start:end], ops.shift), ops.resize)

    zero_out = porch.cat([
        porch.arange(0, index_map.chin.start),
        porch.arange(index_map.chin.end, 33),
        porch.LongTensor([
            index_map.eyebrowsedges.start, index_map.eyebrowsedges.end,
            index_map.lipedges.start, index_map.lipedges.end
        ])
    ])
    x[:, zero_out] = 0

    start, end = index_map.nose
    x[:, start + 1:end] = shift(x[:, start + 1:end], 4 * sw)
    x[:, start:end] = resize(x[:, start:end], 1)

    start, end = index_map.eyes
    x[:, start:end] = resize(x[:, start:end], 1)
    x[:, start:end] = resize(shift(x[:, start:end], -8), 3) + \
        shift(x[:, start:end], -24)

    # Second-level mask
    x2 = deepcopy(x)
    x2[:, index_map.chin.start:index_map.chin.end] = 0  # start:end was 0:33
    x2[:, index_map.lipedges.start:index_map.lipinner.
       end] = 0  # start:end was 76:96
    x2[:, index_map.eyebrows.start:index_map.eyebrows.
       end] = 0  # start:end was 33:51

    x = porch.sum(x, dim=1, keepdim=True)  # (N, 1, H, W)
    x2 = porch.sum(x2, dim=1, keepdim=True)  # mask without faceline and mouth

    x[x != x] = 0  # set nan to zero
    x2[x != x] = 0  # set nan to zero
    return x.clamp_(0, 1), x2.clamp_(0, 1)
Esempio n. 19
0
    def _forward(self, x):
        branch1x1 = self.branch1x1(x)

        branch3x3 = self.branch3x3_1(x)
        branch3x3 = [
            self.branch3x3_2a(branch3x3),
            self.branch3x3_2b(branch3x3),
        ]
        branch3x3 = torch.cat(branch3x3, 1)

        branch3x3dbl = self.branch3x3dbl_1(x)
        branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
        branch3x3dbl = [
            self.branch3x3dbl_3a(branch3x3dbl),
            self.branch3x3dbl_3b(branch3x3dbl),
        ]
        branch3x3dbl = torch.cat(branch3x3dbl, 1)

        branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1)
        branch_pool = self.branch_pool(branch_pool)

        outputs = [branch1x1, branch3x3, branch3x3dbl, branch_pool]
        return outputs
Esempio n. 20
0
 def forward(self, z, y):
   # If hierarchical, concatenate zs and ys
   if self.hier:
     z = torch.cat([y, z], 1)      
     y = z
   # First linear layer
   h = self.linear(z)
   # Reshape
   h = h.view(h.size(0), -1, self.bottom_width, self.bottom_width)    
   # Loop over blocks
   for index, blocklist in enumerate(self.blocks):
     # Second inner loop in case block has multiple layers
     for block in blocklist:
       h = block(h, y)
       
   # Apply batchnorm-relu-conv-tanh at output
   return torch.tanh(self.output_layer(h))
Esempio n. 21
0
def translate_and_reconstruct(nets, args, x_src, y_src, x_ref, y_ref,
                              filename):
    x_ref.stop_gradient = True
    y_ref.stop_gradient = True
    x_src.stop_gradient = True
    y_src.stop_gradient = True
    N, C, H, W = x_src.shape
    s_ref = nets.style_encoder(x_ref, y_ref)
    masks = nets.fan.get_heatmap(x_src) if args.w_hpf > 0 else None
    x_fake = nets.generator(x_src, s_ref, masks=masks)
    s_src = nets.style_encoder(x_src, y_src)
    masks = nets.fan.get_heatmap(x_fake) if args.w_hpf > 0 else None
    x_rec = nets.generator(x_fake, s_src, masks=masks)
    x_concat = [x_src, x_ref, x_fake, x_rec]
    x_concat = porch.cat(x_concat, dim=0)
    save_image(x_concat, N, filename)
    del x_concat
Esempio n. 22
0
def calculate_fid_given_paths(paths, img_size=256, batch_size=50):
    print('Calculating FID given paths %s and %s...' % (paths[0], paths[1]))
    device = porch.device('cuda' if porch.cuda.is_available() else 'cpu')
    inception = InceptionV3("./metrics/inception_v3_pretrained.pdparams")
    inception.eval()
    loaders = [get_eval_loader(path, img_size, batch_size) for path in paths]

    mu, cov = [], []
    for loader in loaders:
        actvs = []
        for x in tqdm(loader, total=len(loader)):
            x = porch.varbase_to_tensor(x[0])
            actv = inception(x)
            actvs.append(actv)
        actvs = porch.cat(actvs, dim=0).numpy()
        mu.append(np.mean(actvs, axis=0))
        cov.append(np.cov(actvs, rowvar=False))
    fid_value = frechet_distance(mu[0], cov[0], mu[1], cov[1])
    return fid_value.astype(float)
Esempio n. 23
0
def shift(x, N):
    """Shift N pixels up or down."""

    up = N >= 0
    N = abs(N)
    _, _, H, W = x.shape
    if N == 0:
        return x
    if up:
        head = torch.arange(H - N) + N
        tail = torch.arange(N)
    else:
        head = torch.arange(N) + (H - N)
        tail = torch.arange(H - N)

    # permutation indices
    perm = torch.cat([head, tail])
    out = torch.stack([x[:, :, int(a)] for a in perm.numpy()], dim=2)
    return out
Esempio n. 24
0
def shift(x, N):
    """Shift N pixels up or down."""
    up = N >= 0
    N = abs(N)
    _, _, H, W = x.size()
    head = porch.arange(N)
    tail = porch.arange(H - N)

    if up:
        head = porch.arange(H - N) + N
        tail = porch.arange(N)
    else:
        head = porch.arange(N) + (H - N)
        tail = porch.arange(H - N)

    # permutation indices
    perm = porch.cat([head, tail]).to(x.device)
    out = x[:, :, perm, :]
    return out
        def forward(self, x):
            residual = x

            out1 = self.bn1(x)
            out1 = F.relu(out1, True)
            out1 = self.conv1(out1)

            out2 = self.bn2(out1)
            out2 = F.relu(out2, True)
            out2 = self.conv2(out2)

            out3 = self.bn3(out2)
            out3 = F.relu(out3, True)
            out3 = self.conv3(out3)

            out3 = torch.cat((out1, out2, out3), 1)
            if self.downsample is not None:
                residual = self.downsample(residual)
            out3 += residual
            return out3
Esempio n. 26
0
def test_moco(train_loader, model, opt):
    """
    one epoch training for moco
    """

    model.eval()

    emb_list = []
    for idx, batch in enumerate(train_loader):
        graph_q, graph_k = batch
        bsz = graph_q.batch_size
        graph_q.to(opt.device)
        graph_k.to(opt.device)

        with torch.no_grad():
            feat_q = model(graph_q)
            feat_k = model(graph_k)

        assert feat_q.shape == (bsz, opt.hidden_size)
        emb_list.append(((feat_q + feat_k) / 2).detach().cpu())
    return torch.cat(emb_list)
Esempio n. 27
0
def slide(entries, margin=32):
    """Returns a sliding reference window.
    Args:
        entries: a list containing two reference images, x_prev and x_next, 
                 both of which has a shape (1, 3, 256, 256)
    Returns:
        canvas: output slide of shape (num_frames, 3, 256*2, 256+margin)
    """
    _, C, H, W = entries[0].shape
    alphas = get_alphas()
    T = len(alphas)  # number of frames

    canvas = -porch.ones(T, C, H * 2, W + margin)
    merged = porch.cat(entries, dim=2)  # (1, 3, 512, 256)
    for t, alpha in enumerate(alphas):
        top = int(H * (1 - alpha))  # top, bottom for canvas
        bottom = H * 2
        m_top = 0  # top, bottom for merged
        m_bottom = 2 * H - top
        canvas[t, :, top:bottom, :W] = merged[:, :, m_top:m_bottom, :]
    return canvas
Esempio n. 28
0
def make_grid(tensor,
              nrow=8,
              padding=2,
              normalize=False,
              range=None,
              scale_each=False,
              pad_value=0):
    """Make a grid of images.

    Args:
        tensor (Tensor or list): 4D mini-batch Tensor of shape (B x C x H x W)
            or a list of images all of the same size.
        nrow (int, optional): Number of images displayed in each row of the grid.
            The final grid size is ``(B / nrow, nrow)``. Default: ``8``.
        padding (int, optional): amount of padding. Default: ``2``.
        normalize (bool, optional): If True, shift the image to the range (0, 1),
            by the min and max values specified by :attr:`range`. Default: ``False``.
        range (tuple, optional): tuple (min, max) where min and max are numbers,
            then these numbers are used to normalize the image. By default, min and max
            are computed from the tensor.
        scale_each (bool, optional): If ``True``, scale each image in the batch of
            images separately rather than the (min, max) over all images. Default: ``False``.
        pad_value (float, optional): Value for the padded pixels. Default: ``0``.

    Example:
        See this notebook `here <https://gist.github.com/anonymous/bf16430f7750c023141c562f3e9f2a91>`_

    """

    tensor = torch.Tensor(tensor)
    if not (torch.is_tensor(tensor) or
            (isinstance(tensor, list)
             and all(torch.is_tensor(t) for t in tensor))):
        raise TypeError('tensor or list of tensors expected, got {}'.format(
            type(tensor)))

    # if list of tensors, convert to a 4D mini-batch Tensor
    if isinstance(tensor, list):
        tensor = torch.stack(tensor, dim=0)

    if tensor.dim() == 2:  # single image H x W
        tensor = tensor.unsqueeze(0)
    if tensor.dim() == 3:  # single image
        if tensor.size(0) == 1:  # if single-channel, convert to 3-channel
            tensor = torch.cat((tensor, tensor, tensor), 0)
        tensor = tensor.unsqueeze(0)

    if tensor.dim() == 4 and tensor.size(1) == 1:  # single-channel images
        tensor = torch.cat((tensor, tensor, tensor), 1)

    if normalize is True:
        tensor = tensor.clone()  # avoid modifying tensor in-place
        if range is not None:
            assert isinstance(range, tuple), \
                "range has to be a tuple (min, max) if specified. min and max are numbers"

        def norm_ip(img, min, max):
            img.clamp_(min=min, max=max)
            img.add_(-min).div_(max - min + 1e-5)

        def norm_range(t, range):
            if range is not None:
                norm_ip(t, range[0], range[1])
            else:
                norm_ip(t, float(t.min()), float(t.max()))

        if scale_each is True:
            for t in tensor:  # loop over mini-batch dimension
                norm_range(t, range)
        else:
            norm_range(tensor, range)

    if tensor.size(0) == 1:
        return tensor.squeeze(0)

    # make the mini-batch of images into a grid
    nmaps = tensor.size(0)
    xmaps = min(nrow, nmaps)
    ymaps = int(math.ceil(float(nmaps) / xmaps))
    height, width = int(tensor.size(2) +
                        padding), int(tensor.size(3) + padding)
    num_channels = tensor.size(1)
    # grid = tensor.new_full((num_channels, height * ymaps + padding, width * xmaps + padding), pad_value)
    grid = np.zeros((num_channels, height * ymaps + padding,
                     width * xmaps + padding)) + pad_value
    k = 0
    for y in irange(ymaps):
        for x in irange(xmaps):
            if k >= nmaps:
                break
            # sub_grid=grid[:, (y * height + padding):(y * height + padding+height - padding) ][:,:,(x * width + padding):(x * width + padding+width - padding)]
            # torch.copy(tensor[k],sub_grid)
            grid[:, (y * height + padding):(y * height + padding + height -
                                            padding),
                 (x * width + padding):(x * width + padding + width -
                                        padding)] = tensor[k].numpy()
            # torch.copy(tensor[k],torch.narrow(torch.narrow(grid,1, y * height + padding, height - padding)\
            #     ,2, x * width + padding, width - padding) )

            k = k + 1
    return torch.Tensor(grid)
Esempio n. 29
0
 def shortcut(self, x):
   if self.downsample:
     x = self.downsample(x)
   if self.learnable_sc:
     x = torch.cat([x, self.conv_sc(x)], 1)    
   return x
Esempio n. 30
0
    def forward(self, g, return_all_outputs=False):
        """Predict molecule labels

        Parameters
        ----------
        g : DGLGraph
            Input DGLGraph for molecule(s)
        n_feat : tensor of dtype float32 and shape (B1, D1)
            Node features. B1 for number of nodes and D1 for
            the node feature size.
        e_feat : tensor of dtype float32 and shape (B2, D2)
            Edge features. B2 for number of edges and D2 for
            the edge feature size.

        Returns
        -------
        res : Predicted labels
        """

        # nfreq = g.ndata["nfreq"]
        if self.degree_input:
            device = g.ndata["seed"].device
            degrees = g.in_degrees()
            if device != torch.device("cpu"):
                degrees = degrees

            n_feat = torch.cat(
                (
                    g.ndata["pos_undirected"],
                    self.degree_embedding(torch.clamp(degrees,0,self.max_degree)), 
                    g.ndata["seed"].unsqueeze(1).float(),
                ),
                dim=-1,
            )
        else:
            n_feat = torch.cat(
                (
                    g.ndata["pos_undirected"],
                    # g.ndata["pos_directed"],
                    # self.node_freq_embedding(nfreq.clamp(0, self.max_node_freq)),
                    # self.degree_embedding(degrees.clamp(0, self.max_degree)),
                    g.ndata["seed"].unsqueeze(1).float(),
                    # nfreq.unsqueeze(1).float() / self.max_node_freq,
                    # degrees.unsqueeze(1).float() / self.max_degree,
                ),
                dim=-1,
            )

        # efreq = g.edata["efreq"]
        # e_feat = torch.cat(
        #     (
        #         self.edge_freq_embedding(efreq.clamp(0, self.max_edge_freq)),
        #         efreq.unsqueeze(1).float() / self.max_edge_freq,
        #     ),
        #     dim=-1,
        # )
        e_feat = None
        if self.gnn_model == "gin":
            x, all_outputs = self.gnn(g, n_feat, e_feat)
        else:
            x, all_outputs = self.gnn(g, n_feat, e_feat), None
            x = self.set2set(g, x)
            x = self.lin_readout(x)
        if self.norm:
            x = F.normalize(x, p=2, dim=-1, eps=1e-5)
        if return_all_outputs:
            return x, all_outputs
        else:
            return x