コード例 #1
0
def PytorchLinear(x,
                  input_shape,
                  hidden_size,
                  weights=[],
                  name="",
                  return_dims=False):
    need_reshape = len(input_shape) > 2
    if need_reshape:
        new_in_shape = (np.prod(input_shape[:-1]), input_shape[-1])
        x = lbann.Reshape(x, dims=str_list(new_in_shape))

    if len(input_shape) == 1:
        y = lbann.FullyConnected(x,
                                 num_neurons=hidden_size,
                                 weights=weights,
                                 name=name)
    else:
        y = lbann.ChannelwiseFullyConnected(x,
                                            output_channel_dims=[hidden_size],
                                            weights=weights,
                                            name=name)

    if need_reshape:
        new_out_shape = input_shape[:-1] + (hidden_size, )
        y = lbann.Reshape(y, dims=str_list(new_out_shape))
    else:
        new_out_shape = (input_shape[0], hidden_size)

    if return_dims:
        return y, new_out_shape
    return y
コード例 #2
0
ファイル: fftshift.py プロジェクト: benson31/lbann
    def forward(self, x, dims):
        """Apply fftshift.

        Args:
            x (lbann.Layer): Input tensor
            dims (tuple of int): Dimensions of x (dim 0 corresponds to
                channel)

        Returns:
            Layer: Output tensor

        """

        # Get gather indices by applying fftshift to tensor filled with indices
        # Note: Independent fftshift for each channel (dim 0)
        spatial_size = np.prod(dims[1:])
        spatial_inds = np.arange(spatial_size).reshape(dims[1:])
        spatial_inds = np.fft.fftshift(spatial_inds)
        channel_offsets = np.arange(0, dims[0] * spatial_size, spatial_size)
        channel_offsets = channel_offsets.reshape([-1] +
                                                  [1] * spatial_inds.ndim)
        inds = np.expand_dims(spatial_inds, 0) + channel_offsets

        # Construct LBANN layer graph
        size = np.prod(dims)
        x = lbann.Reshape(x, dims=str_list([size]))
        inds = lbann.WeightsLayer(
            weights=lbann.Weights(
                lbann.ValueInitializer(values=str_list(inds.flatten())),
                optimizer=lbann.NoOptimizer(),
            ),
            dims=str_list([size]),
        )
        y = lbann.Gather(x, inds)
        return lbann.Reshape(y, dims=str_list(dims))
コード例 #3
0
    def forward(self, img, z, mcr):
        '''
        Steps: 
        - Modify image if using mcr
        - D1 + imgs -> d1_real
        - G + noise -> gen_imgs
        - D1 + gen_imgs -> d1_fake
        - Adv (D2) + gen_imgs
        Return D outputs and gen_imgs
        '''

        print('MCR in forward', mcr)
        if mcr:  ### Multi-channel rescaling. Add extra channel for real images. Generated images are rescaled inside generator
            linear_scale = 1 / self.linear_scaler
            ch2 = lbann.Tanh(
                lbann.WeightedSum(self.inv_transform(lbann.Identity(img)),
                                  scaling_factors=str(linear_scale)))
            y = lbann.Concatenation(lbann.Identity(img), ch2, axis=0)
            img = lbann.Reshape(y, dims='2 128 128')
        else:
            img = lbann.Reshape(img, dims='1 128 128')

        d1_real = self.forward_discriminator1(img)  #instance1
        gen_img = self.forward_generator(z, mcr=mcr)

        d1_fake = self.forward_discriminator1(
            lbann.StopGradient(gen_img))  #instance2
        d_adv = self.forward_discriminator2(
            gen_img)  #instance 3 //need to freeze
        #d1s share weights, d1_w is copied to d_adv (through replace weight callback) and freeze

        return d1_real, d1_fake, d_adv, gen_img, img
コード例 #4
0
    def forward_generator(self, z, mcr):
        '''
        Build the Generator
        '''
        x = lbann.Relu(
            lbann.BatchNormalization(self.g_fc1(z),
                                     decay=0.9,
                                     scale_init=1.0,
                                     epsilon=1e-5))
        dims = '512 8 8'
        x = lbann.Reshape(x, dims=dims)  #channel first

        for count, lyr in enumerate(self.g_convT):
            x = lbann.Relu(
                lbann.BatchNormalization(lyr(x),
                                         decay=0.9,
                                         scale_init=1.0,
                                         epsilon=1e-5))

        img = self.g_convT3(x)

        if mcr:  ### For multi-channel rescaling, add extra channel to output image
            linear_scale = 1 / self.linear_scaler
            #             linear_scale=lbann.Constant(value=0.001)
            ch2 = lbann.Tanh(
                lbann.WeightedSum(self.inv_transform(img),
                                  scaling_factors=str(linear_scale)))
            y = lbann.Concatenation(img, ch2, axis=0)
            img = lbann.Reshape(y, dims='2 128 128')
        else:
            img = lbann.Reshape(img, dims='1 128 128')

        return img
コード例 #5
0
ファイル: transformations.py プロジェクト: benson31/lbann
def Permute(x, dims, axes=None, name="", return_dims=False):
    global _permute_cache
    key = (dims, axes)
    size = np.prod(dims)
    if key not in _permute_cache:
        # Construct gather indices
        inds = np.arange(size).reshape(dims, order="C").transpose(axes)
        inds = lbann.Weights(
            initializer=lbann.ValueInitializer(values=str_list(
                np.nditer(inds, order="C")), ),
            optimizer=lbann.NoOptimizer(),
        )
        inds = lbann.WeightsLayer(dims=str_list([size]), weights=inds)
        _permute_cache[key] = inds

    # Apply transpose with gather
    inds = _permute_cache[key]
    if axes == None:
        new_dims = dims[::-1]
    else:
        new_dims = np.array(dims)[list(axes)]
    x = lbann.Reshape(x, dims=str_list([size]))
    y = lbann.Gather(x, inds)
    y = lbann.Reshape(y, dims=str_list(list(new_dims)), name=name)

    if return_dims:
        return y, tuple(new_dims)
    return y
コード例 #6
0
 def forward_discriminator2(self,y):
     ch2 = self.inv_transform(lbann.Identity(y))
     y = lbann.Concatenation(lbann.Identity(y),ch2,axis=0)
     img = lbann.Reshape(y, dims='2 128 128')
     x = lbann.LeakyRelu(self.d2_conv[0](img), negative_slope=0.2)
     x = lbann.LeakyRelu(self.d2_conv[1](x), negative_slope=0.2)
     x = lbann.LeakyRelu(self.d2_conv[2](x), negative_slope=0.2)
     x = lbann.LeakyRelu(self.d2_conv[3](x), negative_slope=0.2)
     return self.d2_fc(lbann.Reshape(x,dims='32768')) 
コード例 #7
0
    def forward(self, x, z):
        """Do the WAE forward step

        :param x: list of tensors of longs, embed representation of input
        :return: float, kl term component of loss
        :return: float, recon component of loss
        """

        x = lbann.Slice(x, slice_points=str_list([0, self.input_feature_dims]))
        x = lbann.Identity(x)
        x_emb = lbann.Embedding(x,
                                num_embeddings=self.dictionary_size,
                                embedding_dim=self.embedding_size,
                                name='emb',
                                weights=self.emb_weights)

        # Encoder: x -> z, kl_loss
        z_sample = self.forward_encoder(x_emb)

        eps = lbann.Gaussian(mean=self.gmean,
                             stdev=self.gstd,
                             hint_layer=z_sample)
        z_sample = lbann.Add([z_sample, eps])

        # Decoder: x, z -> recon_loss
        #pred = self.forward_decoder(x_emb, z_sample)
        pred, arg_max = self.forward_decoder(x_emb, z_sample)
        recon_loss = self.compute_loss(x, pred)

        # Hack to remove blocking GPU allreduce in evaluation layer
        #kl_loss = lbann.Identity(kl_loss, device='CPU')
        recon_loss = lbann.Identity(recon_loss, device='CPU')

        z_prior = lbann.Tessellate(
            lbann.Reshape(z, dims=str_list([1, self.zdim])),
            dims=str_list([self.input_feature_dims, self.zdim]),
        )

        d_real = self.discriminator0(
            lbann.Concatenation([x_emb, z_prior], axis=1))

        z_sample0 = lbann.Tessellate(
            lbann.Reshape(z_sample, dims=str_list([1, self.zdim])),
            dims=str_list([self.input_feature_dims, self.zdim]),
        )
        y_z_sample = lbann.Concatenation([x_emb, z_sample0], axis=1)

        d_fake = self.discriminator0(lbann.StopGradient(y_z_sample))
        d_adv = self.discriminator1(y_z_sample)  #freeze

        return recon_loss, d_real, d_fake, d_adv, arg_max
コード例 #8
0
ファイル: train_ras_classifier.py プロジェクト: oyamay/lbann
def construct_model():
    """Model description

    """
    import lbann
    import lbann.modules

    fc = lbann.modules.FullyConnectedModule
    conv = lbann.modules.Convolution2dModule

    conv1 = conv(20, 3, stride=1, padding=1, name='conv1')
    conv2 = conv(20, 3, stride=1, padding=1, name='conv2')
    fc1 = fc(100, name='fc1')
    fc2 = fc(20, name='fc2')
    fc3 = fc(num_classes, name='fc3')
    # Layer graph
    input = lbann.Input(name='inp_tensor', target_mode='classification')
    inp_slice = lbann.Slice(input,
                            axis=0,
                            slice_points=str_list([0, dims - 1, dims]),
                            name='inp_slice')
    xdata = lbann.Identity(inp_slice)
    ylabel = lbann.Identity(inp_slice, name='gt_y')
    #NHWC to NCHW
    x = lbann.Reshape(xdata, dims='14 13 13')
    x = conv2(conv1(x))
    x = lbann.Reshape(x, dims='3380')
    x = lbann.Dropout(lbann.Relu(fc1(x)), keep_prob=0.5)
    x = lbann.Dropout(fc2(x), keep_prob=0.5)
    pred = lbann.Softmax(fc3(x))
    gt_label = lbann.OneHot(ylabel, size=num_classes)
    loss = lbann.CrossEntropy([pred, gt_label], name='loss')
    acc = lbann.CategoricalAccuracy([pred, gt_label])

    layers = list(lbann.traverse_layer_graph(input))
    # Setup objective function
    weights = set()
    for l in layers:
        weights.update(l.weights)
    obj = lbann.ObjectiveFunction(loss)

    callbacks = [lbann.CallbackPrint(), lbann.CallbackTimer()]

    # Construct model
    num_epochs = 10
    return lbann.Model(num_epochs,
                       weights=weights,
                       layers=layers,
                       metrics=[lbann.Metric(acc, name='accuracy', unit='%')],
                       objective_function=obj,
                       callbacks=callbacks)
コード例 #9
0
    def forward(self, image, dims, max_r):
        """Compute radial profile.

        Args:
            image (lbann.Layer): Image
            dims (tuple of int): Image dimensions (dim 0 corresponds
                to channel)
            max_r (int): Maximum radial distance. Pixels outside this
                distance are ignored.

        Returns:
            Layer: num_channels x max_r radial profile

        """

        # Bin spatial positions
        r, r_counts = self._find_radial_bins(dims[1:], max_r)

        # Reciprocal of bin counts
        # Note: If a count is 0, its reciprocal is 0.
        r_counts_recip = [0 if c == 0 else 1 / c for c in r_counts]

        # Get scatter indices and scaling factors
        # Note: Independent binning for each channel (dim 0)
        tile_dims = [dims[0]] + [1] * r.ndim
        inds_vals = np.tile(r, tile_dims)
        inds_vals += np.arange(0, dims[0] * max_r, max_r).reshape(tile_dims)
        inds_vals[:, r >= max_r] = -1
        inds_vals = inds_vals.flatten()
        scales_vals = r_counts_recip * dims[0]

        # Construct LBANN layer graph
        image = lbann.Reshape(image, dims=str_list([np.prod(dims)]))
        inds = lbann.WeightsLayer(
            weights=lbann.Weights(
                lbann.ValueInitializer(values=str_list(inds_vals)),
                optimizer=lbann.NoOptimizer(),
            ),
            dims=str_list([len(inds_vals)]),
        )
        r_sums = lbann.Scatter(image, inds, dims=str_list([dims[0] * max_r]))
        scales = lbann.WeightsLayer(
            weights=lbann.Weights(
                lbann.ValueInitializer(values=str_list(scales_vals)),
                optimizer=lbann.NoOptimizer(),
            ),
            dims=str_list([len(scales_vals)]),
        )
        r_means = lbann.Multiply(scales, r_sums)
        return lbann.Reshape(r_means, dims=str_list([dims[0], max_r]))
コード例 #10
0
    def get_mat(self, cols = None):
        """Generates a matrix representation of the graph data.

           args: cols (int) 
        """
        
        mat = lbann.Concatenation(self.layers)

        if (cols):
            mat = lbann.Reshape(mat, dims=str_list([self.shape[0], cols]))    
        else:
            mat = lbann.Reshape(mat, dims=str_list([self.shape[0], self.shape[1]]))

        return mat
コード例 #11
0
 def decoder_cnn(self, z):
     x = self.dec_cnn_fc(z)
     sca = self.dec_fc_sca(lbann.Identity(x))
     img = lbann.Reshape(lbann.Identity(x),
                         dims="16 8 8",
                         name=self.name + 'dec_reshape0')
     img = self.dec_convT[2](lbann.Relu(self.dec_convT[1](lbann.Relu(
         self.dec_convT[0](img)))))
     #concat for common interface, slice in output
     img = lbann.Reshape(img,
                         dims=str(64 * 64 * 4),
                         name=self.name +
                         'dec_reshape1')  #?? check tensor shape
     #todo check that concat size == dec_out_dim
     return lbann.Concatenation([img, sca], axis=0)
コード例 #12
0
    def matrix_to_graph(cls, mat_layer, num_vertices, num_features):
        """Given a 2D matrix of shape (num_vertices, num_features), returns a 
           GraphVertexData object with num_vertices number of nodes with num_features. 
           
        """

        slice_points = str_list([i for i in range(0,num_vertices * num_features + 1, num_features)])
        flattened_layer = lbann.Reshape(mat_layer, dims = str(num_vertices * num_features))
        sliced_mat_layer = lbann.Slice(flattened_layer, axis = 0, slice_points = slice_points)

        list_of_layers = []
        for node in range(num_vertices):
            temp = lbann.Identity(sliced_mat_layer)
            list_of_layers.append(lbann.Reshape(temp, dims=str_list([1, num_features])))
        return cls(list_of_layers, num_features)
コード例 #13
0
ファイル: vae.py プロジェクト: oyamay/lbann
    def forward_decoder(self, x_emb, z):
        """Decoder step, emulating x ~ G(z)

        :param x_emb: (n_batch, len(x), d_z) of floats, embeddings for input sentence x
        :param z: (n_batch, d_z) of floats, latent vector z
        :return: float, recon component of loss
        :return: list of ints, reconstructed sentence
        """

        # z_0 = z.unsqueeze(1).repeat(1, x_emb.size(1), 1)
        # x_input = torch.cat([x_emb, z_0], dim=-1)
        z_0 = lbann.Tessellate(
            lbann.Reshape(z, dims=str_list([1, 128])),
            dims=str_list([self.input_feature_dims, 128]),
        )
        x_input = lbann.Concatenation(x_emb, z_0, axis=1)

        h_0 = self.decoder_lat(z)
        # h_0 = h_0.unsqueeze(0).repeat(self.decoder_rnn.num_layers, 1, 1)
        h_0 = lbann.Reshape(h_0, dims=str_list([1, 512]))
        h_0 = lbann.Tessellate(h_0, dims=str_list((3, 512)))

        # output, _ = self.decoder_rnn(x_input, h_0)
        output = self.decoder_rnn(x_input, h_0)

        # y = self.decoder_fc(output)
        y = lbann.ChannelwiseFullyConnected(
            output,
            output_channel_dims=self.dictionary_size,
            bias=True,
            name=f'{self.decoder_fc.name}',
            weights=self.decoder_fc.weights,
        )

        # Set datatype of layers
        # Note: Depth-first search from y to x_emb and z
        stack = [y]
        in_stack = {l: True for l in stack}
        while stack:
            l = stack.pop()
            if type(l) not in (lbann.Slice, lbann.Reshape, lbann.Tessellate):
                l.datatype = self.datatype
            for parent in l.parents:
                if parent not in in_stack and parent not in (x_emb, z):
                    stack.append(parent)
                    in_stack[parent] = True

        return y
コード例 #14
0
ファイル: GCNConv.py プロジェクト: benson31/lbann
    def forward(self, node_feature_mat, source_indices, target_indices):
        """Apply GCN

        Args:
            node_feature_mat (Layer): Node feature matrix with the shape of (num_nodes,input_channels) 
            source_indices (Layer): Source node indices of the edges with shape (num_nodes)
            target_indices (Layer): Target node indices of the edges with shape (num_nodes)
        Returns:     
            (Layer) : The output after kernel ops. The output can passed into another Graph Conv layer
                          directly
        """

        self.instance += 1
        name = f"{self.name}_{self.instance}"
        new_features = self.nn(node_feature_mat)  # W \times node_feature_mat

        # If distconv enabled, the output dimensions of the feature matrix are 3D
        # We convert it to 2D for the graph expan and reduce operations
        # Note: This check will be obsolete once distconv scatter-gather is supported
        if self.is_distconv:
            new_features = lbann.Reshape(
                new_features,
                dims=str_list([self.num_nodes, self.output_channel_size]),
                name=f"{name}+_distconv_reshape")

        neighborhoods = GraphExpand(new_features, target_indices)
        reduced_features = GraphReduce(
            neighborhoods, source_indices,
            [self.num_nodes, self.output_channel_size])

        return reduced_features
コード例 #15
0
 def forward_generator(self,z):
     x = lbann.Relu(lbann.BatchNormalization(self.g_fc1(z),decay=0.9,scale_init=1.0,epsilon=1e-5))
     x = lbann.Reshape(x, dims='512 8 8') #channel first
     x = lbann.Relu(lbann.BatchNormalization(self.g_convT[0](x),decay=0.9,scale_init=1.0,epsilon=1e-5))
     x = lbann.Relu(lbann.BatchNormalization(self.g_convT[1](x),decay=0.9,scale_init=1.0,epsilon=1e-5))
     x = lbann.Relu(lbann.BatchNormalization(self.g_convT[2](x),decay=0.9,scale_init=1.0,epsilon=1e-5))
     return self.g_convT3(x) 
コード例 #16
0
ファイル: NNConv.py プロジェクト: benson31/lbann
    def aggregate(self,
                  edge_values,
                  edge_indices):
        """Aggregate the messages from the neighbors of the nodes
        Args:
            edge_values (Layer): A layer of edge features of
                                   shape (num_edges, edge_features)
            edge_indices (Layer): A 1D layer of node features of
                                shape (num_edges).
                                The indices used for reduction
        Returns:
            (Layer): A 2D layer of updated node features
        """

        node_feature_dims = [self.num_nodes , self.output_channels]
        edge_feature_dims = [self.num_edges , self.output_channels]

        edge_values = lbann.Reshape(edge_values,
                                    dims=str_list(edge_feature_dims),
                                    name=self.name+"_neighbor_features")
        edge_reduce = lbann.Scatter(edge_values,
                                    edge_indices,
                                    dims=str_list(node_feature_dims),
                                    axis=0,
                                    name=self.name+"_aggregate")

        return edge_reduce
コード例 #17
0
 def encoder_cnn(self, y):
     img_sca = lbann.Slice(y,
                           axis=0,
                           slice_points="0 16384 16399",
                           name=self.name + '_y_slice')
     #assume C first, is data C first?
     img = lbann.Reshape(img_sca,
                         dims='4 64 64',
                         name=self.name + 'enc_reshape0')
     x = self.enc_conv[2](self.enc_conv[1](self.enc_conv[0](img)))
     x = lbann.Reshape(x,
                       dims=str(16 * 8 * 8),
                       name=self.name + 'enc_reshape1')
     h_stack = lbann.Concatenation([x, img_sca], axis=0)
     z = self.enc_out(h_stack)
     return z
コード例 #18
0
ファイル: random_projection.py プロジェクト: oyamay/lbann
def random_projection(indices, num_projections, projection_dim):

    # Expand input indices to get an index for each vector entry
    # Note: proj_indices(i) = index*projection_dim + i
    proj_indices = lbann.WeightedSum(
        indices,
        scaling_factors=utils.str_list(projection_dim),
    )
    iota = lbann.WeightsLayer(
        dims=utils.str_list(projection_dim),
        weights=lbann.Weights(
            initializer=lbann.ValueInitializer(
                values=utils.str_list(range(projection_dim))),
            optimizer=lbann.NoOptimizer(),
        ),
    )
    proj_indices = lbann.Sum(
        lbann.Tessellate(
            lbann.Reshape(proj_indices,
                          dims=utils.str_list([num_projections, 1])),
            dims=utils.str_list([num_projections, projection_dim]),
        ),
        lbann.Tessellate(
            lbann.Reshape(iota, dims=utils.str_list([1, projection_dim])),
            dims=utils.str_list([num_projections, projection_dim]),
        ),
    )

    # Apply hash function and convert to Gaussian distribution
    proj = lbann.UniformHash(proj_indices)
    ones = lbann.Constant(
        value=1,
        num_neurons=utils.str_list([num_projections, projection_dim]),
    )
    eps = 0.001
    proj = lbann.ErfInv(
        lbann.WeightedSum(
            proj,
            ones,
            scaling_factors=utils.str_list([2 * (1 - eps), -(1 - eps)]),
        ))
    proj = lbann.InstanceNorm(proj)
    proj = lbann.WeightedSum(
        proj,
        scaling_factors=utils.str_list(1 / projection_dim),
    )
    return proj
コード例 #19
0
 def transpose_for_scores(self, x, dims):
     new_x_shape = dims[:-1] + (self.num_attention_heads,
                                self.attention_head_size)
     x = lbann.Reshape(x, dims=lbann.util.str_list(new_x_shape))
     return lbann.modules.Permute(x,
                                  new_x_shape,
                                  axes=(0, 2, 1, 3),
                                  return_dims=True)
コード例 #20
0
ファイル: random_projection.py プロジェクト: oyamay/lbann
def mean_squared_error(
    data_dim,
    sequence_length,
    source_sequence,
    target_sequence,
    scale_decay=0.8,
):

    # Compute inner product between source and target vectors
    # Note: Inner products are computed for each (x,y) pair and a
    # weighted sum is computed. The scaling factors sum to 1 and decay
    # exponentially as x and y get further apart in the sequence.
    prods = lbann.MatMul(
        source_sequence,
        target_sequence,
        transpose_b=True,
    )
    scale_dims = (sequence_length, sequence_length)
    scales = np.zeros(scale_dims)
    for i in range(sequence_length):
        for j in range(sequence_length):
            if i != j:
                scales[i, j] = ((1 - scale_decay) / (2 * scale_decay) *
                                scale_decay**np.abs(j - i))
    scales = lbann.Weights(
        initializer=lbann.ValueInitializer(
            values=utils.str_list(np.nditer(scales))),
        optimizer=lbann.NoOptimizer(),
    )
    scales = lbann.WeightsLayer(dims=utils.str_list(scale_dims),
                                weights=scales)
    prods = lbann.MatMul(
        lbann.Reshape(prods, dims='1 -1'),
        lbann.Reshape(scales, dims='1 -1'),
        transpose_b=True,
    )
    prods = lbann.Reshape(prods, dims='1')

    # MSE(x,y) = ( norm(x)^2 + norm(y)^T - 2*prod(x,y) ) / dim(x)
    scale = 1 / (data_dim * sequence_length)
    return lbann.WeightedSum(lbann.L2Norm2(source_sequence),
                             lbann.L2Norm2(target_sequence),
                             prods,
                             scaling_factors=utils.str_list(
                                 [scale, scale, -2 * scale]))
コード例 #21
0
    def forward(
        self,
        input_ids=None,
        attention_mask=None,
        token_type_ids=None,
        position_ids=None,
        head_mask=None,
        inputs_embeds=None,
    ):

        if attention_mask is None:
            attention_mask = lbann.Constant(value=1,
                                            num_neurons=str_list(
                                                self.attn_mask_shape))

        if token_type_ids is None:
            token_type_ids = lbann.Constant(value=0,
                                            num_neurons=str_list(
                                                self.input_shape))

        if head_mask is None:
            head_mask = [None] * self.config.num_hidden_layers

        input_ids = lbann.Reshape(input_ids, dims=str_list(self.input_shape))
        embedding_output = self.embeddings(
            input_ids=input_ids,
            position_ids=position_ids,
            token_type_ids=token_type_ids,
            inputs_embeds=inputs_embeds,
        )
        embedding_output = lbann.Reshape(embedding_output,
                                         dims=str_list(self.input_shape +
                                                       (self.hidden_size, )))
        encoder_output = self.encoder(
            embedding_output,
            attention_mask=attention_mask,
            head_mask=head_mask,
        )
        pooled_output = self.pooler(
            encoder_output) if self.pooler is not None else None

        if pooled_output is not None:
            return pooled_output
        else:
            return encoder_output
コード例 #22
0
def PytorchLayerNorm(x, epsilon, input_shape, weights=[], name=""):
    if len(input_shape) > 2:
        x = lbann.Reshape(x,
                          dims=str_list(
                              [np.prod(input_shape[:-1]), input_shape[-1]]))
    x = lbann.InstanceNorm(x, epsilon=epsilon)
    x = lbann.Reshape(x, dims=str_list(input_shape))
    if weights is not []:
        x, new_x_shape = lbann.modules.Permute(x,
                                               input_shape,
                                               return_dims=True)
        x = lbann.ChannelwiseScaleBias(x, weights=weights)
        x, _ = lbann.modules.Permute(x,
                                     new_x_shape,
                                     return_dims=True,
                                     name=name)

    return x
コード例 #23
0
ファイル: GatedGraphConv.py プロジェクト: oyamay/lbann
    def forward(self, X, A):
        """Call the GatedGraphConv
        Args:
            X (GraphVertexData): LBANN Data object, which is a collection of Layers. Each Layer is of
                                 the shape (1,input_channels) 
            A (Layer): Adjacency matrix input with shape (num_nodes, num_nodes)
        Returns: 
            LBANN_Data_Mat: The output after Gated Graph Kernel. 
                        The output can passed into another Graph Conv layer directly

        """

        input_features = X.size(1)
        num_nodes = X.size(0)

        if (input_features < self.output_channels):
            for i in range(num_nodes):
                num_zeros = self.output_channels - input_features
                zeros = lbann.Constant(value=0,
                                       num_neurons=str_list([1, num_zeros]),
                                       name=self.name + '_zero_' + str(i))
                X[i] = lbann.Concatenation(X[i], zeros, axis=1)
        elif (input_features > self.output_channels):
            ValueError(
                'The feature size of the nodes {} cannot be greater than the output dimension {}'
                .format(input_features, self.output_channels))

        X.update_num_features(self.output_channels)

        for layer in range(self.num_layers):
            ##
            X_mat = X.get_mat()
            messages = lbann.MatMul(X_mat, self.weights[layer])
            aggregate = lbann.MatMul(A, messages)

            M = GraphVertexData.matrix_to_graph(aggregate, num_nodes,
                                                self.output_channels)

            for i in range(num_nodes):
                X[i] = lbann.Reshape(X[i], dims=str(self.output_channels))
                X[i] = lbann.Reshape(self.rnn(M[i], X[i])[1],
                                     dims=str_list([1, self.output_channels]))

        return X
コード例 #24
0
    def forward_generator(self, z, mcr):
        '''
        Build the Generator
        '''
        x = lbann.Relu(
            lbann.BatchNormalization(self.g_fc1(z),
                                     decay=0.9,
                                     scale_init=1.0,
                                     epsilon=1e-5))
        dims = '512 8 8'

        print("dims", dims)
        x = lbann.Reshape(x, dims=dims)  #channel first
        x = lbann.Relu(
            lbann.BatchNormalization(self.g_convT[0](x),
                                     decay=0.9,
                                     scale_init=1.0,
                                     epsilon=1e-5))
        x = lbann.Relu(
            lbann.BatchNormalization(self.g_convT[1](x),
                                     decay=0.9,
                                     scale_init=1.0,
                                     epsilon=1e-5))
        x = lbann.Relu(
            lbann.BatchNormalization(self.g_convT[2](x),
                                     decay=0.9,
                                     scale_init=1.0,
                                     epsilon=1e-5))
        img = self.g_convT3(x)

        if mcr:  ### For multi-channel rescaling, add extra channel to output image
            linear_scale = 1 / self.linear_scaler
            #ch2 = lbann.Tanh(self.inv_transform(img)/linear_scalar)
            ch2 = lbann.Tanh(
                lbann.WeightedSum(self.inv_transform(img),
                                  scaling_factors=str(linear_scale)))
            y = lbann.Concatenation(img, ch2, axis=0)
            img = lbann.Reshape(y, dims='2 128 128')
        else:
            img = lbann.Reshape(img, dims='1 128 128')

        print('Gen Img in GAN', img.__dict__)
        return img
コード例 #25
0
ファイル: NNConv.py プロジェクト: benson31/lbann
    def message(self,
                node_features,
                neighbor_features,
                edge_features):
        """Update node features and edge features. The Message stage of the
           convolution.
        Args:
            node_features (Layer); A 2D layer of node features of
                                   shape (num_nodes, input_channels)
            neighbor_features (Layer): A 3D layer of node features of
                                       shape (num_edges, 1, input_channels)
            edge_features (Layer): A 2D layer of edge features of
                                   shape (num_edges, edge_features)
        Returns:
            (Layer, Layer): Returns the updated node features and the messages
            for each node.
        """

        ## These reshapes do not change the nn output but enables channelwise partitioning 
        ## for distconv channelwiseFC natively 
        
        node_features = lbann.Reshape(node_features, dims=str_list([self.num_nodes, 1, self.input_channels]))
        edge_features = lbann.Reshape(edge_features, dims=str_list([self.num_edges, 1, self.edge_input_channels]))

        updated_node_features = self.node_nn(node_features)

        edge_update = None
        for layer in self.edge_nn:

            if edge_update:
                edge_update = layer(edge_update)
            else:
                edge_update = layer(edge_features)

        edge_values = \
            lbann.Reshape(edge_update,
                          dims=str_list([self.num_edges,
                                         self.input_channels,
                                         self.output_channels]),
                          name=self.name+"_edge_mat_reshape")
        edge_values = \
            lbann.MatMul(neighbor_features, edge_values)
        return updated_node_features, edge_values
コード例 #26
0
def positive_samples_loss(
        sequence_length,
        encoder_embeddings,
        decoder_embeddings,
        scale_decay=0.8,
):

    # Compute similarity scores between encoder and decoder embeddings
    scores = lbann.MatMul(
        encoder_embeddings,
        decoder_embeddings,
        transpose_b=True,
    )
    scores = lbann.LogSigmoid(scores)

    # Scale similarity scores and add together
    # Note: The scaling factor decays exponentially as embeddings get
    # futher apart in the sequence.
    # Note: The sum of all the scaling factors is approximately -1.
    scale_dims = (sequence_length,sequence_length)
    scales = np.zeros(scale_dims)
    for i in range(sequence_length):
        for j in range(sequence_length):
            if i != j:
                scales[i,j] = (
                    -(1-scale_decay)/(2*scale_decay*sequence_length)
                    * scale_decay**np.abs(j-i)
                )
    scales = lbann.Weights(
        initializer=lbann.ValueInitializer(values=utils.str_list(np.nditer(scales))),
        optimizer=lbann.NoOptimizer(),
    )
    scales = lbann.WeightsLayer(dims=utils.str_list(scale_dims), weights=scales)
    loss = lbann.MatMul(
        lbann.Reshape(scores, dims='1 -1'),
        lbann.Reshape(scales, dims='1 -1'),
        transpose_b=True,
    )
    loss = lbann.Reshape(loss, dims='1')
    return loss
コード例 #27
0
    def forward(self, node_features_mat, edge_features_tensor,
                node_features_tensor, adjacency_tensor):

        num_edges = self.num_nodes**2

        edge_ft_shape = str_list(
            [num_edges, self.input_channels, self.output_channels])
        node_ft_tensor_shape = str_list(
            [self.num_nodes, self.num_nodes, self.output_channels])
        node_ft_mat_shape = str_list([self.num_nodes, self.output_channels])

        transformed_edge_ft_tensor = None

        for layer in self.edge_nn:
            if transformed_edge_ft_tensor is not None:
                transformed_edge_ft_tensor = layer(transformed_edge_ft_tensor)
            else:
                transformed_edge_ft_tensor = layer(edge_features_tensor)

        transformed_edge_ft_tensor = lbann.Reshape(transformed_edge_ft_tensor,
                                                   dims=edge_ft_shape,
                                                   name=self.name +
                                                   "_edge_ft_reshape")

        new_node_features = lbann.MatMul(node_features_tensor,
                                         transformed_edge_ft_tensor)
        new_node_features = lbann.Reshape(new_node_features,
                                          dims=node_ft_tensor_shape)

        gathered_node_features = lbann.MatMul(adjacency_tensor,
                                              new_node_features)

        new_node_features = lbann.Reshape(gathered_node_features,
                                          dims=node_ft_mat_shape)
        updated_nodes = self.node_nn(node_features_mat)

        out = lbann.Sum(new_node_features, updated_nodes)

        return out
コード例 #28
0
    def forward_discriminator2(self, img):
        '''
        Discriminator 2. Weights are frozen as part of Adversarial network = Stacked G + D
        '''
        x = lbann.LeakyRelu(self.d2_conv[0](img), negative_slope=0.2)
        x = lbann.LeakyRelu(self.d2_conv[1](x), negative_slope=0.2)
        x = lbann.LeakyRelu(self.d2_conv[2](x), negative_slope=0.2)
        x = lbann.LeakyRelu(self.d2_conv[3](x), negative_slope=0.2)
        dims = 32768
        #dims=25088 ## for padding=1
        y = self.d2_fc(lbann.Reshape(x, dims=str(dims)))

        return y
コード例 #29
0
ファイル: 2_ExaGAN.py プロジェクト: vmos1/lbann_cosmogan
    def forward_discriminator2(self, img):
        '''
        Discriminator 2. Weights are frozen as part of Adversarial network = Stacked G + D
        '''

        for count, lyr in enumerate(self.d2_conv):
            if count == 0: x = lbann.LeakyRelu(lyr(img), negative_slope=0.2)
            else: x = lbann.LeakyRelu(lyr(x), negative_slope=0.2)
        dims = 32768
        #dims=25088 ## for padding=1
        y = self.d2_fc(lbann.Reshape(x, dims=str(dims)))

        return y
コード例 #30
0
def PytorchMatmul(x, x_shape, y, y_shape, return_dims=False):
    if len(x_shape) != len(y_shape):
        raise RuntimeError(
            "Broadcasting not fully implemented, tensors must have same dimension"
        )
    need_reshape = (len(x_shape) > 3) and (len(y_shape) > 3)
    if need_reshape:
        if x_shape[:-2] != y_shape[:-2]:
            raise RuntimeError("The first n-2 dimensions must match")
        new_x_shape = (np.prod(x_shape[:-2]), ) + x_shape[-2:]
        x = lbann.Reshape(x, dims=str_list(new_x_shape))

        new_y_shape = (np.prod(y_shape[:-2]), ) + y_shape[-2:]
        y = lbann.Reshape(y, dims=str_list(new_y_shape))

    z = lbann.MatMul(x, y)

    z_shape = x_shape[:-1] + (y_shape[-1], )
    if need_reshape:
        z = lbann.Reshape(z, dims=str_list(z_shape))

    if return_dims:
        return z, z_shape
    return z