def __init__(self, output_size, eps=1e-5, momentum=0.1, cross_replica=False, mybn=False): super(bn, self).__init__() self.output_size = output_size # Prepare gain and bias layers self.gain = torch.nn.Parameter(output_size, 1.0) self.bias = torch.nn.Parameter(output_size, 0.0) # epsilon to avoid dividing by 0 self.eps = eps # Momentum self.momentum = momentum # Use cross-replica batchnorm? self.cross_replica = cross_replica # Use my batchnorm? self.mybn = mybn if self.cross_replica: self.bn = SyncBN2d(output_size, eps=self.eps, momentum=self.momentum, affine=False) elif mybn: self.bn = myBN(output_size, self.eps, self.momentum) # Register buffers if neither of the above else: self.stored_mean = torch.nn.Parameter(torch.zeros(output_size)) self.stored_var = torch.nn.Parameter(torch.ones(output_size))
def __init__(self, num_features, eps=1e-5, momentum=0.1, affine=True, track_running_stats=True): super(_BatchNormBase, self).__init__() self.num_features = num_features self.eps = eps self.momentum = momentum self.affine = affine self.track_running_stats = track_running_stats if self.affine: self.weight = Parameter(torch.Ones(num_features)) self.bias = Parameter(torch.Zeros(num_features)) else: self.register_parameter('weight', None) self.register_parameter('bias', None) if self.track_running_stats: self.register_buffer('running_mean', torch.zeros(num_features)) self.register_buffer('running_var', torch.ones(num_features)) self.register_buffer('num_batches_tracked', torch.tensor(0, dtype=torch.long)) else: self.register_parameter('running_mean', None) self.register_parameter('running_var', None) self.register_parameter('num_batches_tracked', None) self.reset_parameters()
def __init__(self, num_channels, eps=1e-5, momentum=0.1): super(myBN, self).__init__() # momentum for updating running stats self.momentum = momentum # epsilon to avoid dividing by 0 self.eps = eps # Momentum self.momentum = momentum # Register buffers self.stored_mean = torch.nn.Parameter(torch.zeros(num_channels)) self.stored_var = torch.nn.Parameter(torch.ones(num_channels)) self.accumulation_counter = torch.nn.Parameter(torch.zeros(1)) # Accumulate running means and vars self.accumulate_standing = False
def __init__(self, num_svs, num_itrs, num_outputs, transpose=False, eps=1e-12): # Number of power iterations per step self.num_itrs = num_itrs # Number of singular values self.num_svs = num_svs # Transposed? self.transpose = transpose # Epsilon value for avoiding divide-by-0 self.eps = eps self.register_buffer = dict() # Register a singular vector for each sv self.name = "%d_%d_%d" % (num_svs, num_itrs, num_outputs) for i in range(self.num_svs): self.__setattr__('u%d' % i, torch.nn.Parameter(torch.randn(1, num_outputs))) self.__setattr__('sv%d' % i, torch.nn.Parameter(torch.ones(1)))
def slide(entries, margin=32): """Returns a sliding reference window. Args: entries: a list containing two reference images, x_prev and x_next, both of which has a shape (1, 3, 256, 256) Returns: canvas: output slide of shape (num_frames, 3, 256*2, 256+margin) """ _, C, H, W = entries[0].shape alphas = get_alphas() T = len(alphas) # number of frames canvas = -porch.ones(T, C, H * 2, W + margin) merged = porch.cat(entries, dim=2) # (1, 3, 512, 256) for t, alpha in enumerate(alphas): top = int(H * (1 - alpha)) # top, bottom for canvas bottom = H * 2 m_top = 0 # top, bottom for merged m_bottom = 2 * H - top canvas[t, :, top:bottom, :W] = merged[:, :, m_top:m_bottom, :] return canvas
def translate_using_reference(nets, args, x_src, x_ref, y_ref, filename): x_ref.stop_gradient = True y_ref.stop_gradient = True x_src.stop_gradient = True N, C, H, W = x_src.shape wb = porch.ones(1, C, H, W) x_src_with_wb = porch.cat([wb, x_src], dim=0) masks = nets.fan.get_heatmap(x_src) if args.w_hpf > 0 else None s_ref = nets.style_encoder(x_ref, y_ref) s_ref_list = s_ref.unsqueeze(1).repeat(1, N, 1) x_concat = [x_src_with_wb] for i, s_ref in enumerate(s_ref_list): x_fake = nets.generator(x_src, s_ref, masks=masks) x_fake_with_ref = porch.cat([x_ref[i:i + 1], x_fake], dim=0) x_concat += [x_fake_with_ref] x_concat = porch.cat(x_concat, dim=0) save_image(x_concat, N + 1, filename) del x_concat
def __init__( self, output_size, input_size, which_linear, eps=1e-5, momentum=0.1, cross_replica=False, mybn=False, norm_style='bn', ): super(ccbn, self).__init__() self.output_size, self.input_size = output_size, input_size # Prepare gain and bias layers self.gain = which_linear(input_size, output_size) self.bias = which_linear(input_size, output_size) # epsilon to avoid dividing by 0 self.eps = eps # Momentum self.momentum = momentum # Use cross-replica batchnorm? self.cross_replica = cross_replica # Use my batchnorm? self.mybn = mybn # Norm style? self.norm_style = norm_style if self.cross_replica: self.bn = SyncBN2d(output_size, eps=self.eps, momentum=self.momentum, affine=False) elif self.mybn: self.bn = myBN(output_size, self.eps, self.momentum) elif self.norm_style in ['bn', 'in']: self.stored_mean = torch.nn.Parameter(torch.zeros(output_size)) self.stored_var = torch.nn.Parameter(torch.ones(output_size))
e_feat = None if self.gnn_model == "gin": x, all_outputs = self.gnn(g, n_feat, e_feat) else: x, all_outputs = self.gnn(g, n_feat, e_feat), None x = self.set2set(g, x) x = self.lin_readout(x) if self.norm: x = F.normalize(x, p=2, dim=-1, eps=1e-5) if return_all_outputs: return x, all_outputs else: return x if __name__ == "__main__": model = GraphEncoder(gnn_model="gin") print(model) g = dgl.DGLGraph() g.add_nodes(3) g.add_edges([0, 0, 1, 2], [1, 2, 2, 1]) g.ndata["pos_directed"] = torch.rand(3, 16) g.ndata["pos_undirected"] = torch.rand(3, 16) g.ndata["seed"] = torch.zeros(3, dtype=torch.long) g.ndata["nfreq"] = torch.ones(3, dtype=torch.long) g.edata["efreq"] = torch.ones(4, dtype=torch.long) g = dgl.batch([g, g, g]) y = model(g) print(y.shape) print(y)