Ejemplo n.º 1
0
 def inv_transform(self,y): 
     inv_transform = lbann.WeightedSum(
                                   lbann.SafeDivide(
                                   lbann.Add(lbann.Constant(value=1.0, hint_layer=y),lbann.Identity(y)),
                                   lbann.Subtract(lbann.Constant(value=1.0, hint_layer=y),lbann.Identity(y))),
                                   scaling_factors=str(self.datascale))
     linear_scale = 1/self.linear_scaler
     CH2 = lbann.Tanh(lbann.WeightedSum(inv_transform,scaling_factors=str(linear_scale)))
     return CH2  
Ejemplo n.º 2
0
def construct_model():
    """Construct LBANN model.

    Pilot1 Combo model

    """
    import lbann

    # Layer graph
    data = lbann.Input(data_field='samples')
    responses = lbann.Input(data_field='responses')

    pred = combo.Combo()(data)
    mse = lbann.MeanSquaredError([responses, pred])

    SS_res = lbann.Reduction(lbann.Square(lbann.Subtract(responses, pred)),
                             mode='sum')

    #SS_tot = var(x) = mean((x-mean(x))^2)
    mini_batch_size = lbann.MiniBatchSize()
    mean = lbann.Divide(lbann.BatchwiseReduceSum(responses), mini_batch_size)
    SS_tot = lbann.Divide(
        lbann.BatchwiseReduceSum(lbann.Square(lbann.Subtract(responses,
                                                             mean))),
        mini_batch_size)
    eps = lbann.Constant(value=1e-07, hint_layer=SS_tot)
    r2 = lbann.Subtract(lbann.Constant(value=1, num_neurons='1'),
                        lbann.Divide(SS_res, lbann.Add(SS_tot, eps)))

    metrics = [lbann.Metric(mse, name='mse')]
    metrics.append(lbann.Metric(r2, name='r2'))

    callbacks = [lbann.CallbackPrint(), lbann.CallbackTimer()]

    # Construct model
    num_epochs = 100
    layers = list(lbann.traverse_layer_graph([data, responses]))
    return lbann.Model(num_epochs,
                       layers=layers,
                       metrics=metrics,
                       objective_function=mse,
                       callbacks=callbacks)
Ejemplo n.º 3
0
def f_invtransform(y, scale=4.0):  ### Transform to original space
    '''
    The inverse of the transformation function that scales the data before training
    '''
    inv_transform = lbann.WeightedSum(lbann.SafeDivide(
        lbann.Add(lbann.Constant(value=1.0, hint_layer=y), lbann.Identity(y)),
        lbann.Subtract(lbann.Constant(value=1.0, hint_layer=y),
                       lbann.Identity(y))),
                                      scaling_factors=str(scale))

    return inv_transform
Ejemplo n.º 4
0
 def forward(self, inputs):
     if len(inputs) != 2:
         raise ValueError('expected two inputs: predictions and labels')
     pred = inputs[0]
     label = inputs[1]
     ones = p.Constant(hint_layer=pred, value=1.0)
     term1 = lbann.Multiply(
         [label, lbann.Log(lbann.Subtract([ones, pred]))])
     term2 = lbann.Log(pred)
     full = lbann.WeightedSum([term1, term2], scaling_factors='-1.0 -1.0')
     return lbann.Reduction(full)
Ejemplo n.º 5
0
    def inv_transform(self, y):  ### Original transformation
        '''
        The inverse of the transformation function that scales the data before training
        '''
        inv_transform = lbann.WeightedSum(lbann.SafeDivide(
            lbann.Add(lbann.Constant(value=1.0, hint_layer=y),
                      lbann.Identity(y)),
            lbann.Subtract(lbann.Constant(value=1.0, hint_layer=y),
                           lbann.Identity(y))),
                                          scaling_factors=str(self.datascale))

        return inv_transform
Ejemplo n.º 6
0
 def inv_transform(self, y):
     '''
     The inverse of the transformation function that scales the data before training
     '''
     inv_transform = lbann.WeightedSum(lbann.SafeDivide(
         lbann.Add(lbann.Constant(value=1.0, hint_layer=y),
                   lbann.Identity(y)),
         lbann.Subtract(lbann.Constant(value=1.0, hint_layer=y),
                        lbann.Identity(y))),
                                       scaling_factors=str(self.datascale))
     #linear_scale = 1/self.linear_scaler
     #CH2 = lbann.Tanh(lbann.WeightedSum(inv_transform,scaling_factors=str(linear_scale)))
     #return CH2
     return inv_transform
Ejemplo n.º 7
0
    def inv_transform(self, y):  ### Original transformation
        '''
        The inverse of the transformation function that scales the data before training
        '''
        inv_transform = lbann.WeightedSum(lbann.SafeDivide(
            lbann.Add(lbann.Constant(value=1.0, hint_layer=y),
                      lbann.Identity(y)),
            lbann.Subtract(lbann.Constant(value=1.0, hint_layer=y),
                           lbann.Identity(y))),
                                          scaling_factors=str(self.datascale))

        return inv_transform

#      def inv_transform(self, y):### New tranformation : log-linear


#         threshold = lbann.Constant(value=0.5, hint_layer=y)
#         is_above_threshold = lbann.Greater(y, threshold)
#         is_below_threshold = lbann.LogicalNot(is_above_threshold)

#         below = lbann.SafeDivide(
#             lbann.Subtract(y, lbann.Constant(value=1, hint_layer=y)),
#             lbann.Constant(value=0.03, hint_layer=y),
#         )
#         above = lbann.Exp(lbann.SafeDivide(
#             lbann.Subtract(
#                 y,
#                 lbann.Constant(value=0.5-0.5/math.log(300)*math.log(50), hint_layer=y)),
#             lbann.Constant(value=0.5/math.log(300), hint_layer=y),
#         ))
#         return lbann.Add(
#             lbann.Multiply(is_above_threshold, above),
#             lbann.Multiply(is_below_threshold, below),
#         )

# def f_invtransform_new(y):
#     if y<=0.5:
#         a=0.03;b=-1.0
#         return (y-b)/a
#     elif y>0.5:
#         a=0.5/np.log(300)
#         b=0.5-a*np.log(50)
#         return np.exp((y-b)/a)
Ejemplo n.º 8
0
    def compute_loss(self, x, y):

        # y[:, :-1]
        y = lbann.Slice(
            y,
            axis=0,
            slice_points=str_list([0, self.input_feature_dims - 1]),
        )
        y = lbann.Identity(y)

        # x[:, 1:]
        x = lbann.Slice(
            x,
            slice_points=str_list([1, self.input_feature_dims]),
        )
        x = lbann.Identity(x)

        # Convert indices in x to one-hot representation
        # Note: Ignored indices result in zero vectors
        ignore_mask = lbann.Equal(
            x,
            self.constant(self.label_to_ignore, hint_layer=x),
        )
        keep_mask = lbann.LogicalNot(ignore_mask)
        length = lbann.Reduction(keep_mask, mode='sum')
        length = lbann.Max(length, self.constant(1, [1]))
        x = lbann.Add(
            lbann.Multiply(keep_mask, x),
            lbann.Multiply(ignore_mask, self.constant(-1, hint_layer=x)),
        )
        x = lbann.Slice(x,
                        slice_points=str_list(range(self.input_feature_dims)))
        x = [lbann.Identity(x) for _ in range(self.input_feature_dims - 1)]
        x = [lbann.OneHot(xi, size=self.dictionary_size) for xi in x]
        x = [
            lbann.Reshape(xi, dims=str_list([1, self.dictionary_size]))
            for xi in x
        ]
        x = lbann.Concatenation(x, axis=0)

        # recon_loss = F.cross_entropy(
        #     y[:, :-1].contiguous().view(-1, y.size(-1)),
        #     x[:, 1:].contiguous().view(-1),
        #     ignore_index=self.pad
        # )
        # Note: Ideally we'd shift y by y.max(-1) for numerical stability
        shifts = lbann.MatMul(
            lbann.Max(y, self.constant(0, hint_layer=y)),
            self.constant(
                1 / math.sqrt(self.dictionary_size),
                [self.dictionary_size, self.dictionary_size],
            ),
        )
        y = lbann.Subtract(y, shifts)
        z = lbann.MatMul(
            lbann.Exp(y),
            self.constant(1, [self.dictionary_size, 1]),
        )
        z = lbann.Log(z)
        z = lbann.MatMul(
            lbann.Reshape(keep_mask, dims=str_list([1, -1])),
            z,
        )
        recon_loss = lbann.MatMul(
            lbann.Reshape(y, dims=str_list([1, -1])),
            lbann.Reshape(x, dims=str_list([1, -1])),
            transpose_b=True,
        )
        recon_loss = lbann.Subtract(z, recon_loss)
        recon_loss = lbann.Reshape(recon_loss, dims=str_list([1]))
        recon_loss = lbann.Divide(recon_loss, length)

        return recon_loss
Ejemplo n.º 9
0
    def compute_loss(self, x, y):

        # y[:, :-1]
        y = lbann.Slice(
            y,
            axis=0,
            slice_points=str_list([0, self.input_feature_dims-1]),
        )
        y = lbann.Identity(y)

        # x[:, 1:]
        x = lbann.Slice(
            x,
            slice_points=str_list([1, self.input_feature_dims]),
        )
        x = lbann.Identity(x)

        # Figure out entries in x to ignore
        ignore_mask = lbann.Equal(
            x,
            self.constant(self.label_to_ignore, hint_layer=x),
        )
        keep_mask = lbann.LogicalNot(ignore_mask)
        length = lbann.Reduction(keep_mask, mode='sum')
        length = lbann.Max(length, self.constant(1, [1]))

        # Convert entries in x to indices in y
        # Note: Ignored entries correspond to an index of -1.
        offsets = [
            row*self.dictionary_size
            for row in range(self.input_feature_dims-1)
        ]
        offsets = lbann.Weights(
            initializer=lbann.ValueInitializer(values=str_list(offsets)),
            optimizer=lbann.NoOptimizer(),
        )
        offsets = lbann.WeightsLayer(
            dims=str_list([self.input_feature_dims-1]),
            weights=offsets,
        )
        y_inds = lbann.Add(x, offsets)
        y_inds = lbann.Add(
            lbann.Multiply(keep_mask, y_inds),
            lbann.Multiply(
                ignore_mask,
                self.constant(-1, hint_layer=y_inds),
            ),
        )

        # recon_loss = F.cross_entropy(
        #     y[:, :-1].contiguous().view(-1, y.size(-1)),
        #     x[:, 1:].contiguous().view(-1),
        #     ignore_index=self.pad
        # )

        # Shift y for numerical stability
        # Note: We'd prefer to shift by y.max(-1)
        shifts = lbann.MatMul(
            lbann.Max(y, self.constant(0, hint_layer=y)),
            self.constant(
                1 / math.sqrt(self.dictionary_size),
                [self.dictionary_size, self.dictionary_size],
            ),
        )
        y = lbann.Subtract(y, shifts)

        # Compute log of softmax denominator and sum
        z = lbann.MatMul(
            lbann.Exp(y),
            self.constant(1, [self.dictionary_size, 1]),
        )
        z = lbann.Log(z)
        z = lbann.MatMul(
            lbann.Reshape(keep_mask, dims=str_list([1, -1])),
            z,
        )
        z = lbann.Reshape(z, dims=str_list([1]))

        # Compute cross entropy
        recon_loss = lbann.Gather(
            lbann.Reshape(y, dims=str_list([-1])),
            y_inds,
        )
        recon_loss = lbann.Reduction(recon_loss, mode='sum')
        recon_loss = lbann.Subtract(z, recon_loss)
        recon_loss = lbann.Divide(recon_loss, length)

        return recon_loss