示例#1
0
    def __init__(self, model, input_record, name='batch_lr_loss',
                 average_loss=True, **kwargs):
        super(BatchLRLoss, self).__init__(model, name, input_record, **kwargs)

        self.average_loss = average_loss

        assert (schema.is_schema_subset(
            schema.Struct(
                ('label', schema.Scalar()),
                ('prediction', schema.Scalar())
            ),
            input_record
        ) or schema.is_schema_subset(
            schema.Struct(
                ('label', schema.Scalar()),
                ('logit', schema.Scalar())
            ),
            input_record
        ))

        self.tags.update([Tags.EXCLUDE_FROM_PREDICTION])

        self.output_schema = schema.Scalar(
            np.float32,
            self.get_next_blob_reference('output')
        )
示例#2
0
    def __init__(self, model, input_record, name='batch_lr_loss', **kwargs):
        super(BatchLRLoss, self).__init__(model, name, input_record, **kwargs)

        schema.is_schema_subset(
            schema.Struct(('label', schema.Scalar()),
                          ('prediction', schema.Scalar())), input_record)
        self.tags.update({Tags.TRAIN_ONLY})

        self.output_schema = schema.Scalar(
            np.float32, model.net.NextScopedBlob(name + '_output'))
示例#3
0
    def __init__(self, model, input_record, name='batch_lr_loss', **kwargs):
        super(BatchLRLoss, self).__init__(model, name, input_record, **kwargs)

        schema.is_schema_subset(
            schema.Struct(
                ('label', schema.Scalar()),
                ('prediction', schema.Scalar())
            ),
            input_record
        )
        self.tags.update({Tags.TRAIN_ONLY})

        self.output_schema = schema.Scalar(
            np.float32,
            core.ScopedBlobReference(model.net.NextName(self.name + '_output')))
示例#4
0
    def __init__(
        self,
        model,
        input_record,
        name='batch_sigmoid_cross_entropy_loss',
        **kwargs
    ):
        super(BatchSigmoidCrossEntropyLoss, self).__init__(
            model, name, input_record, **kwargs)

        assert schema.is_schema_subset(
            schema.Struct(
                ('label', schema.Scalar(np.float32)),
                ('prediction', schema.Scalar(np.float32)),
            ),
            input_record
        )
        assert input_record.prediction.field_type().shape == \
            input_record.label.field_type().shape, \
            "prediction and label must have the same shape"

        self.tags.update({Tags.TRAIN_ONLY})

        self.output_schema = schema.Scalar(
            (np.float32, tuple()), model.net.NextScopedBlob(name + '_loss')
        )
示例#5
0
    def __init__(self,
                 model,
                 input_record,
                 name='batch_softmax_loss',
                 label_smoothing_matrix=None,
                 **kwargs):
        super(BatchSoftmaxLoss, self).__init__(model, name, input_record,
                                               **kwargs)

        assert schema.is_schema_subset(
            schema.Struct(
                ('label', schema.Scalar()),
                ('prediction', schema.Scalar()),
            ), input_record)
        # default case: label is given NOT as target distribution
        self.label_prob = False

        # label smoothing matrix: a K * K matrix where K is the label
        # cardinality; (i, j) element is the value of for label i
        # treated/smoothed as label j
        self.label_smoothing_matrix = label_smoothing_matrix
        if self.label_smoothing_matrix is not None:
            self.initialize_label_smoothing_constants()

        self.output_schema = schema.Struct(
            ('softmax',
             schema.Scalar(input_record.prediction.field_type(),
                           self.get_next_blob_reference('softmax'))),
            ('loss',
             schema.Scalar(np.float32, self.get_next_blob_reference('loss'))),
        )
示例#6
0
    def __init__(
            self, model, input_record,
            name='batch_distill_lr_loss', teacherWeight=0.0, **kwargs):

        super(BatchDistillLRLoss, self).__init__(model, name, input_record, **kwargs)

        assert teacherWeight >= 0 and teacherWeight <= 1, (
            'teacherWeight=%0.2f should be in [0, 1]' % teacherWeight
        )
        self._teacherWeight = teacherWeight

        assert schema.is_schema_subset(
            schema.Struct(
                ('teacher_label', schema.Scalar()),
                ('label', schema.Scalar()),
                ('prediction', schema.Scalar())
            ),
            input_record
        )
        self.tags.update([Tags.EXCLUDE_FROM_PREDICTION])

        self.output_schema = schema.Scalar(
            np.float32,
            self.get_next_blob_reference('output')
        )
示例#7
0
    def __init__(self,
                 model,
                 input_record,
                 name='batch_lr_loss',
                 average_loss=True,
                 jsd_weight=0.0,
                 **kwargs):
        super(BatchLRLoss, self).__init__(model, name, input_record, **kwargs)

        self.average_loss = average_loss

        assert (schema.is_schema_subset(
            schema.Struct(('label', schema.Scalar()),
                          ('logit', schema.Scalar())), input_record))

        assert jsd_weight >= 0 and jsd_weight <= 1
        self.jsd_weight = jsd_weight
        if self.jsd_weight > 0:
            assert 'prediction' in input_record
            self.jsd_weight_const = model.add_global_constant(
                'jsd_weight', self.jsd_weight)
            self.xent_weight_const = model.add_global_constant(
                'xent_weight', 1 - self.jsd_weight)

        self.tags.update([Tags.EXCLUDE_FROM_PREDICTION])

        self.output_schema = schema.Scalar(
            np.float32, self.get_next_blob_reference('output'))
    def __init__(
        self,
        model,
        input_record,
        name='batch_sigmoid_cross_entropy_loss',
        **kwargs
    ):
        super(BatchSigmoidCrossEntropyLoss, self).__init__(
            model, name, input_record, **kwargs)

        assert schema.is_schema_subset(
            schema.Struct(
                ('label', schema.Scalar(np.float32)),
                ('prediction', schema.Scalar(np.float32)),
            ),
            input_record
        )
        assert input_record.prediction.field_type().shape == \
            input_record.label.field_type().shape, \
            "prediction and label must have the same shape"

        self.tags.update([Tags.EXCLUDE_FROM_PREDICTION])

        self.output_schema = schema.Scalar(
            (np.float32, tuple()), self.get_next_blob_reference('loss')
        )
示例#9
0
    def __init__(
            self, model, input_record,
            name='batch_distill_lr_loss', teacherWeight=0.0, **kwargs):

        super(BatchDistillLRLoss, self).__init__(model, name, input_record, **kwargs)

        assert teacherWeight >= 0 and teacherWeight <= 1, (
            'teacherWeight=%0.2f should be in [0, 1]' % teacherWeight
        )
        self._teacherWeight = teacherWeight

        assert schema.is_schema_subset(
            schema.Struct(
                ('teacher_label', schema.Scalar()),
                ('label', schema.Scalar()),
                ('logit', schema.Scalar()),
            ),
            input_record
        )
        self.tags.update([Tags.EXCLUDE_FROM_PREDICTION])

        self.output_schema = schema.Scalar(
            np.float32,
            self.get_next_blob_reference('output')
        )
示例#10
0
    def __init__(self,
                 model,
                 input_record,
                 prediction_layer,
                 output_dims,
                 name='sampling_train',
                 **kwargs):
        super(SamplingTrain, self).__init__(model, name, input_record,
                                            **kwargs)

        layer_class = get_layer_class(prediction_layer)
        assert issubclass(layer_class, SamplingTrainableMixin)

        assert schema.is_schema_subset(
            schema.Struct(
                ('indices', schema.Scalar()),
                ('input', schema.Scalar()),
            ), input_record)

        self._prediction_layer = layer_class(model,
                                             input_record.input,
                                             output_dims=output_dims,
                                             **kwargs)

        self._prediction_layer.train_param_blobs = [
            model.net.NextBlob(str(blob) + '_sampled')
            for blob in self._prediction_layer.param_blobs
        ]

        self.params = self._prediction_layer.params

        self.output_schema = self._prediction_layer.output_schema
示例#11
0
    def __init__(
        self,
        model,
        input_record,
        name='batch_softmax_loss',
        **kwargs
    ):
        super(BatchSoftmaxLoss, self).__init__(
            model, name, input_record, **kwargs)

        assert schema.is_schema_subset(
            schema.Struct(
                ('label', schema.Scalar()),
                ('prediction', schema.Scalar()),
            ),
            input_record
        )

        self.output_schema = schema.Struct(
            (
                'softmax', schema.Scalar(
                    input_record.prediction.field_type(),
                    self.get_next_blob_reference('softmax')
                )
            ),
            (
                'loss', schema.Scalar(
                    np.float32, self.get_next_blob_reference('loss')
                )
            ),
        )
示例#12
0
    def add_ops(self, net):
        # numerically stable log-softmax with crossentropy
        if schema.is_schema_subset(
            schema.Struct(
                ('label', schema.Scalar()),
                ('logit', schema.Scalar())
            ), self.input_record
        ):
            label = self.input_record.label()
            # mandatory cast to float32
            # self.input_record.label.field_type().base is np.float32 but
            # label type is actually int
            label = net.Cast(
                label,
                net.NextScopedBlob('label_float32'),
                to=core.DataType.FLOAT)
            label = net.ExpandDims(label, net.NextScopedBlob('expanded_label'),
                                    dims=[1])
            xent = net.SigmoidCrossEntropyWithLogits(
                [self.input_record.logit(), label],
                net.NextScopedBlob('cross_entropy'),
            )
        # TODO(T23937449): Change all the use cases of BatchLRLoss to the
        # numerically stable version
        else:
            class_probabilities = net.MakeTwoClass(
                self.input_record.prediction.field_blobs(),
                net.NextScopedBlob('two_class_predictions')
            )
            label = self.input_record.label.field_blobs()
            label = [net.Cast(
                label,
                net.NextScopedBlob('int32_label'),
                to=core.DataType.INT32)]
            xent = net.LabelCrossEntropy(
                [class_probabilities] + label,
                net.NextScopedBlob('cross_entropy'),
            )
        if 'weight' in self.input_record.fields:
            weight_blob = self.input_record.weight()
            if self.input_record.weight.field_type().base != np.float32:
                weight_blob = net.Cast(
                    weight_blob,
                    weight_blob + '_float32',
                    to=core.DataType.FLOAT
                )
            weight_blob = net.StopGradient(
                [weight_blob],
                [net.NextScopedBlob('weight_stop_gradient')],
            )
            xent = net.Mul(
                [xent, weight_blob],
                net.NextScopedBlob('weighted_cross_entropy'),
            )

        if self.average_loss:
            net.AveragedLoss(xent, self.output_schema.field_blobs())
        else:
            net.ReduceFrontSum(xent, self.output_schema.field_blobs())
示例#13
0
 def __init__(self, model, input_record, name='bpr_loss', **kwargs):
     super(BPRLoss, self).__init__(model, name, input_record, **kwargs)
     assert schema.is_schema_subset(
         schema.Struct(
             ('pos_prediction', schema.Scalar()),
             ('neg_prediction', schema.List(np.float32)),
         ), input_record)
     self.tags.update([Tags.EXCLUDE_FROM_PREDICTION])
     self.output_schema = schema.Scalar(
         np.float32, self.get_next_blob_reference('output'))
示例#14
0
    def __init__(self,
                 model,
                 input_record,
                 name='batch_lr_loss',
                 average_loss=True,
                 jsd_weight=0.0,
                 pos_label_target=1.0,
                 neg_label_target=0.0,
                 homotopy_weighting=False,
                 log_D_trick=False,
                 unjoined_lr_loss=False,
                 uncertainty_penalty=1.0,
                 focal_gamma=0.0,
                 stop_grad_in_focal_factor=False,
                 **kwargs):
        super(BatchLRLoss, self).__init__(model, name, input_record, **kwargs)

        self.average_loss = average_loss

        assert (schema.is_schema_subset(
            schema.Struct(('label', schema.Scalar()),
                          ('logit', schema.Scalar())), input_record))

        self.jsd_fuse = False
        assert jsd_weight >= 0 and jsd_weight <= 1
        if jsd_weight > 0 or homotopy_weighting:
            assert 'prediction' in input_record
            self.init_weight(jsd_weight, homotopy_weighting)
            self.jsd_fuse = True
        self.homotopy_weighting = homotopy_weighting

        assert pos_label_target <= 1 and pos_label_target >= 0
        assert neg_label_target <= 1 and neg_label_target >= 0
        assert pos_label_target >= neg_label_target
        self.pos_label_target = pos_label_target
        self.neg_label_target = neg_label_target

        assert not (log_D_trick and unjoined_lr_loss)
        self.log_D_trick = log_D_trick
        self.unjoined_lr_loss = unjoined_lr_loss
        assert uncertainty_penalty >= 0
        self.uncertainty_penalty = uncertainty_penalty

        self.tags.update([Tags.EXCLUDE_FROM_PREDICTION])

        self.output_schema = schema.Scalar(
            np.float32, self.get_next_blob_reference('output'))

        self.focal_gamma = focal_gamma
        self.stop_grad_in_focal_factor = stop_grad_in_focal_factor
示例#15
0
    def __init__(
        self,
        model,
        input_record,
        name='batch_lr_loss',
        average_loss=True,
        jsd_weight=0.0,
        pos_label_target=1.0,
        neg_label_target=0.0,
        homotopy_weighting=False,
        log_D_trick=False,
        unjoined_lr_loss=False,
        **kwargs
    ):
        super(BatchLRLoss, self).__init__(model, name, input_record, **kwargs)

        self.average_loss = average_loss

        assert (schema.is_schema_subset(
            schema.Struct(
                ('label', schema.Scalar()),
                ('logit', schema.Scalar())
            ),
            input_record
        ))

        self.jsd_fuse = False
        assert jsd_weight >= 0 and jsd_weight <= 1
        if jsd_weight > 0 or homotopy_weighting:
            assert 'prediction' in input_record
            self.init_weight(jsd_weight, homotopy_weighting)
            self.jsd_fuse = True
        self.homotopy_weighting = homotopy_weighting

        assert pos_label_target <= 1 and pos_label_target >= 0
        assert neg_label_target <= 1 and neg_label_target >= 0
        assert pos_label_target >= neg_label_target
        self.pos_label_target = pos_label_target
        self.neg_label_target = neg_label_target

        assert not (log_D_trick and unjoined_lr_loss)
        self.log_D_trick = log_D_trick
        self.unjoined_lr_loss = unjoined_lr_loss

        self.tags.update([Tags.EXCLUDE_FROM_PREDICTION])

        self.output_schema = schema.Scalar(
            np.float32,
            self.get_next_blob_reference('output')
        )
示例#16
0
    def __init__(self, model, input_record, name='batch_mse_loss', **kwargs):
        super(BatchMSELoss, self).__init__(model, name, input_record, **kwargs)

        assert schema.is_schema_subset(
            schema.Struct(
                ('label', schema.Scalar()),
                ('prediction', schema.Scalar())
            ),
            input_record
        )
        self.tags.update([Tags.EXCLUDE_FROM_PREDICTION])

        self.output_schema = schema.Scalar(
            np.float32,
            model.net.NextScopedBlob(name + '_output'))
示例#17
0
    def __init__(self, model, input_record, name='batch_mse_loss', **kwargs):
        super(BatchMSELoss, self).__init__(model, name, input_record, **kwargs)

        assert schema.is_schema_subset(
            schema.Struct(
                ('label', schema.Scalar()),
                ('prediction', schema.Scalar())
            ),
            input_record
        )
        self.tags.update([Tags.EXCLUDE_FROM_PREDICTION])

        self.output_schema = schema.Scalar(
            np.float32,
            self.get_next_blob_reference('output'))
示例#18
0
    def __init__(self,
                 model,
                 input_record,
                 name='batch_direct_l1_loss',
                 **kwargs):
        super(BatchDirectL1Loss, self).__init__(model, name, input_record,
                                                **kwargs)

        assert schema.is_schema_subset(
            schema.Struct(('label', schema.Scalar()),
                          ('prediction', schema.Scalar())), input_record)
        self.tags.update([Tags.EXCLUDE_FROM_PREDICTION])

        self.output_schema = schema.Scalar(
            np.float32, self.get_next_blob_reference('output'))
示例#19
0
 def __init__(self, model, input_record, name='margin_rank_loss',
              margin=0.1, **kwargs):
     super(MarginRankLoss, self).__init__(model, name, input_record, **kwargs)
     assert margin >= 0, ('For hinge loss, margin should be no less than 0')
     self._margin = margin
     assert schema.is_schema_subset(
         schema.Struct(
             ('pos_prediction', schema.Scalar()),
             ('neg_prediction', schema.List(np.float32)),
         ),
         input_record
     )
     self.tags.update([Tags.EXCLUDE_FROM_PREDICTION])
     self.output_schema = schema.Scalar(
         np.float32,
         self.get_next_blob_reference('output'))
示例#20
0
 def __init__(self, model, input_record, name='margin_rank_loss',
              margin=0.1, average_loss=False, **kwargs):
     super(MarginRankLoss, self).__init__(model, name, input_record, **kwargs)
     assert margin >= 0, ('For hinge loss, margin should be no less than 0')
     self._margin = margin
     self._average_loss = average_loss
     assert schema.is_schema_subset(
         schema.Struct(
             ('pos_prediction', schema.Scalar()),
             ('neg_prediction', schema.List(np.float32)),
         ),
         input_record
     )
     self.tags.update([Tags.EXCLUDE_FROM_PREDICTION])
     self.output_schema = schema.Scalar(
         np.float32,
         self.get_next_blob_reference('output'))
示例#21
0
    def __init__(
        self,
        model,
        input_record,
        name='batch_lr_loss',
        average_loss=True,
        jsd_weight=0.0,
        pos_label_target=1.0,
        neg_label_target=0.0,
        **kwargs
    ):
        super(BatchLRLoss, self).__init__(model, name, input_record, **kwargs)

        self.average_loss = average_loss

        assert (schema.is_schema_subset(
            schema.Struct(
                ('label', schema.Scalar()),
                ('logit', schema.Scalar())
            ),
            input_record
        ))

        assert jsd_weight >= 0 and jsd_weight <= 1
        self.jsd_weight = jsd_weight
        if self.jsd_weight > 0:
            assert 'prediction' in input_record
            self.jsd_weight_const = model.add_global_constant(
                'jsd_weight', self.jsd_weight
            )
            self.xent_weight_const = model.add_global_constant(
                'xent_weight', 1 - self.jsd_weight
            )

        assert pos_label_target <= 1 and pos_label_target >= 0
        assert neg_label_target <= 1 and neg_label_target >= 0
        assert pos_label_target >= neg_label_target
        self.pos_label_target = pos_label_target
        self.neg_label_target = neg_label_target

        self.tags.update([Tags.EXCLUDE_FROM_PREDICTION])

        self.output_schema = schema.Scalar(
            np.float32,
            self.get_next_blob_reference('output')
        )
示例#22
0
    def __init__(self, model, input_record, name='batch_lr_loss',
                 average_loss=True, **kwargs):
        super(BatchLRLoss, self).__init__(model, name, input_record, **kwargs)

        self.average_loss = average_loss

        assert (schema.is_schema_subset(
            schema.Struct(
                ('label', schema.Scalar()),
                ('logit', schema.Scalar())
            ),
            input_record
        ))

        self.tags.update([Tags.EXCLUDE_FROM_PREDICTION])

        self.output_schema = schema.Scalar(
            np.float32,
            self.get_next_blob_reference('output')
        )
示例#23
0
    def __init__(
            self, model, input_record,
            name='batch_distill_lr_loss', teacher_weight=0.0,
            filter_invalid_teacher_label=False, **kwargs):

        super(BatchDistillLRLoss, self).__init__(model, name, input_record, **kwargs)

        assert teacher_weight >= 0 and teacher_weight <= 1, (
            'teacher_weight=%0.2f should be in [0, 1]' % teacher_weight
        )

        self._teacher_weight = teacher_weight
        self._filter_invalid_teacher_label = filter_invalid_teacher_label
        # hyper-parameter determines whether to filter out bad teacehr labels,
        # i.e., teacher labels that are zero.
        if self._filter_invalid_teacher_label:
            self.threshold = model.add_global_constant(
                str(model.net.NextScopedBlob('threshold')),
                [0.0],   # threshold for filtering teacher weight.
                dtype=np.float
            )
            self.neg_ONE = model.add_global_constant(
                str(model.net.NextScopedBlob('neg_ONE')),
                [-1.0],
                dtype=np.float
            )
            self.ONE = model._GetOne()
        assert schema.is_schema_subset(
            schema.Struct(
                ('teacher_label', schema.Scalar()),
                ('label', schema.Scalar()),
                ('logit', schema.Scalar()),
            ),
            input_record
        )
        self.tags.update([Tags.EXCLUDE_FROM_PREDICTION])

        self.output_schema = schema.Scalar(
            np.float32,
            self.get_next_blob_reference('output')
        )
示例#24
0
    def __init__(
        self,
        model,
        input_record,
        name='batch_softmax_loss',
        label_smoothing_matrix=None,
        **kwargs
    ):
        super(BatchSoftmaxLoss, self).__init__(
            model, name, input_record, **kwargs)

        assert schema.is_schema_subset(
            schema.Struct(
                ('label', schema.Scalar()),
                ('prediction', schema.Scalar()),
            ),
            input_record
        )
        # default case: label is given NOT as target distribution
        self.label_prob = False

        # label smoothing matrix: a K * K matrix where K is the label
        # cardinality; (i, j) element is the value of for label i
        # treated/smoothed as label j
        self.label_smoothing_matrix = label_smoothing_matrix
        if self.label_smoothing_matrix is not None:
            self.initialize_label_smoothing_constants()

        self.output_schema = schema.Struct(
            (
                'softmax', schema.Scalar(
                    input_record.prediction.field_type(),
                    self.get_next_blob_reference('softmax')
                )
            ),
            (
                'loss', schema.Scalar(
                    np.float32, self.get_next_blob_reference('loss')
                )
            ),
        )
示例#25
0
    def __init__(self,
                 model,
                 input_record,
                 name='batch_softmax_loss',
                 **kwargs):
        super(BatchSoftmaxLoss, self).__init__(model, name, input_record,
                                               **kwargs)

        assert schema.is_schema_subset(
            schema.Struct(
                ('label', schema.Scalar()),
                ('prediction', schema.Scalar()),
            ), input_record)

        self.output_schema = schema.Struct(
            ('softmax',
             schema.Scalar(input_record.prediction.field_type(),
                           self.get_next_blob_reference('softmax'))),
            ('loss',
             schema.Scalar(np.float32, self.get_next_blob_reference('loss'))),
        )
示例#26
0
    def __init__(self,
                 model,
                 input_record,
                 name='batch_distill_lr_loss',
                 teacherWeight=0.0,
                 **kwargs):

        super(BatchDistillLRLoss, self).__init__(model, name, input_record,
                                                 **kwargs)

        assert teacherWeight >= 0 and teacherWeight <= 1, (
            'teacherWeight=%0.2f should be in [0, 1]' % teacherWeight)
        self._teacherWeight = teacherWeight

        assert schema.is_schema_subset(
            schema.Struct(('teacher_label', schema.Scalar()),
                          ('label', schema.Scalar()),
                          ('prediction', schema.Scalar())), input_record)
        self.tags.update(Tags.TRAIN_ONLY)

        self.output_schema = schema.Scalar(
            np.float32, model.net.NextScopedBlob(name + '_output'))
示例#27
0
    def __init__(self,
                 model,
                 input_record,
                 name='batch_softmax_loss',
                 **kwargs):
        super(BatchSoftmaxLoss, self).__init__(model, name, input_record,
                                               **kwargs)

        assert schema.is_schema_subset(
            schema.Struct(
                ('label', schema.Scalar()),
                ('prediction', schema.Scalar()),
            ), input_record)
        self.tags.update({Tags.TRAIN_ONLY})

        self.output_schema = schema.Struct(
            ('softmax',
             schema.Scalar(input_record.prediction.field_type(),
                           model.net.NextScopedBlob(name + '_softmax'))),
            ('loss',
             schema.Scalar(np.float32,
                           model.net.NextScopedBlob(name + '_loss'))),
        )
示例#28
0
    def __init__(self,
                 model,
                 input_record,
                 max_scale=1.0,
                 name='batch_direct_weighted_l2_loss',
                 **kwargs):
        super(BatchDirectWeightedL2Loss,
              self).__init__(model, name, input_record, **kwargs)

        assert schema.is_schema_subset(
            schema.Struct(('label', schema.Scalar()),
                          ('prediction', schema.Scalar())), input_record)
        self.tags.update([Tags.EXCLUDE_FROM_PREDICTION])
        self.max_scale = max_scale
        self.output_schema = schema.Struct(
            ('loss',
             schema.Scalar(np.float32, self.get_next_blob_reference('loss'))),
            ('l2_metric',
             schema.Scalar(np.float32,
                           self.get_next_blob_reference('l2_metric'))),
            ('scaled_l2_metric',
             schema.Scalar(np.float32,
                           self.get_next_blob_reference('scaled_l2_metric'))),
        )
示例#29
0
    def __init__(self,
                 model,
                 input_record,
                 name='batch_lr_loss',
                 average_loss=True,
                 jsd_weight=0.0,
                 pos_label_target=1.0,
                 neg_label_target=0.0,
                 homotopy_weighting=False,
                 **kwargs):
        super(BatchLRLoss, self).__init__(model, name, input_record, **kwargs)

        self.average_loss = average_loss

        assert (schema.is_schema_subset(
            schema.Struct(('label', schema.Scalar()),
                          ('logit', schema.Scalar())), input_record))

        self.jsd_fuse = False
        assert jsd_weight >= 0 and jsd_weight <= 1
        if jsd_weight > 0 or homotopy_weighting:
            assert 'prediction' in input_record
            self.init_weight(jsd_weight, homotopy_weighting)
            self.jsd_fuse = True
        self.homotopy_weighting = homotopy_weighting

        assert pos_label_target <= 1 and pos_label_target >= 0
        assert neg_label_target <= 1 and neg_label_target >= 0
        assert pos_label_target >= neg_label_target
        self.pos_label_target = pos_label_target
        self.neg_label_target = neg_label_target

        self.tags.update([Tags.EXCLUDE_FROM_PREDICTION])

        self.output_schema = schema.Scalar(
            np.float32, self.get_next_blob_reference('output'))
示例#30
0
 def _check_output_schema(self):
     assert self._output_schema is not None, "Schema is not initialized"
     assert (self._predict_output_schema is None or
             schema.is_schema_subset(self._predict_output_schema,
                                     self._output_schema)), (
         "predict_output_schema is not a subset of the output_schema")
示例#31
0
    def __init__(self,
                 model,
                 input_record,
                 output_dims,
                 s=1,
                 scale_random=1.0,
                 scale_learned=1.0,
                 weight_init_random=None,
                 bias_init_random=None,
                 weight_init_learned=None,
                 bias_init_learned=None,
                 weight_optim=None,
                 bias_optim=None,
                 set_weight_as_global_constant=False,
                 name='semi_random_features',
                 **kwargs):

        if isinstance(input_record, schema.Struct):
            schema.is_schema_subset(
                schema.Struct(
                    ('full', schema.Scalar()),
                    ('random', schema.Scalar()),
                ), input_record)
            self.input_record_full = input_record.full
            self.input_record_random = input_record.random

        elif isinstance(input_record, schema.Scalar):
            self.input_record_full = input_record
            self.input_record_random = input_record

        super(SemiRandomFeatures, self).__init__(
            model,
            self.input_record_full,
            output_dims,
            s=s,
            scale=scale_random,  # To initialize the random parameters
            weight_init=weight_init_random,
            bias_init=bias_init_random,
            weight_optim=None,
            bias_optim=None,
            set_weight_as_global_constant=set_weight_as_global_constant,
            initialize_output_schema=False,
            name=name,
            **kwargs)

        self.output_schema = schema.Struct(
            (
                'full',
                schema.Scalar((np.float32, output_dims),
                              model.net.NextScopedBlob(name + '_full_output')),
            ),
            (
                'random',
                schema.Scalar(
                    (np.float32, output_dims),
                    model.net.NextScopedBlob(name + '_random_output')),
            ),
        )

        # To initialize the learnable parameters
        assert (scale_learned > 0.0), \
            "Expected scale (learned) > 0, got %s" % scale_learned
        self.stddev = scale_learned * np.sqrt(1.0 / self.input_dims)

        # Learned Parameters
        (self.learned_w,
         self.learned_b) = self._initialize_params('learned_w',
                                                   'learned_b',
                                                   w_init=weight_init_learned,
                                                   b_init=bias_init_learned,
                                                   w_optim=weight_optim,
                                                   b_optim=bias_optim)
示例#32
0
    def __init__(self,
                 model,
                 input_record,
                 output_dims,
                 s=1,
                 scale=None,
                 weight_init=None,
                 bias_init=None,
                 weight_optim=None,
                 bias_optim=None,
                 set_weight_as_global_constant=False,
                 name='semi_random_features',
                 **kwargs):

        if isinstance(input_record, schema.Struct):
            schema.is_schema_subset(
                schema.Struct(
                    ('full', schema.Scalar()),
                    ('random', schema.Scalar()),
                ), input_record)
            self.input_record_full = input_record.full
            self.input_record_random = input_record.random

        elif isinstance(input_record, schema.Scalar):
            self.input_record_full = input_record
            self.input_record_random = input_record

        super(SemiRandomFeatures, self).__init__(
            model,
            self.input_record_full,
            output_dims,
            s=s,
            scale=scale,
            weight_init=weight_init,
            bias_init=bias_init,
            weight_optim=None,
            bias_optim=None,
            set_weight_as_global_constant=set_weight_as_global_constant,
            initialize_output_schema=False,
            name=name,
            **kwargs)

        self.output_schema = schema.Struct(
            (
                'full',
                schema.Scalar((np.float32, output_dims),
                              model.net.NextScopedBlob(name + '_full_output')),
            ),
            (
                'random',
                schema.Scalar(
                    (np.float32, output_dims),
                    model.net.NextScopedBlob(name + '_random_output')),
            ),
        )

        # Learned Parameters
        (self.learned_w,
         self.learned_b) = self._initialize_params('learned_w',
                                                   'learned_b',
                                                   w_init=weight_init,
                                                   b_init=bias_init,
                                                   w_optim=weight_optim,
                                                   b_optim=bias_optim)
示例#33
0
    def __init__(
            self,
            model,
            input_record,
            output_dims,
            s=1,
            scale_random=1.0,
            scale_learned=1.0,
            weight_init_random=None,
            bias_init_random=None,
            weight_init_learned=None,
            bias_init_learned=None,
            weight_optim=None,
            bias_optim=None,
            set_weight_as_global_constant=False,
            name='semi_random_features',
            **kwargs):

        if isinstance(input_record, schema.Struct):
            schema.is_schema_subset(
                schema.Struct(
                    ('full', schema.Scalar()),
                    ('random', schema.Scalar()),
                ),
                input_record
            )
            self.input_record_full = input_record.full
            self.input_record_random = input_record.random

        elif isinstance(input_record, schema.Scalar):
            self.input_record_full = input_record
            self.input_record_random = input_record

        super(SemiRandomFeatures, self).__init__(
            model,
            self.input_record_full,
            output_dims,
            s=s,
            scale=scale_random,       # To initialize the random parameters
            weight_init=weight_init_random,
            bias_init=bias_init_random,
            weight_optim=None,
            bias_optim=None,
            set_weight_as_global_constant=set_weight_as_global_constant,
            initialize_output_schema=False,
            name=name,
            **kwargs)

        self.output_schema = schema.Struct(
            ('full', schema.Scalar(
                (np.float32, output_dims),
                model.net.NextScopedBlob(name + '_full_output')
            ),),
            ('random', schema.Scalar(
                (np.float32, output_dims),
                model.net.NextScopedBlob(name + '_random_output')
            ),),
        )

        # To initialize the learnable parameters
        assert (scale_learned > 0.0), \
            "Expected scale (learned) > 0, got %s" % scale_learned
        self.stddev = scale_learned * np.sqrt(1.0 / self.input_dims)

        # Learned Parameters
        (self.learned_w, self.learned_b) = self._initialize_params(
            'learned_w',
            'learned_b',
            w_init=weight_init_learned,
            b_init=bias_init_learned,
            w_optim=weight_optim,
            b_optim=bias_optim
        )
示例#34
0
文件: layers.py 项目: Ralfhund/caffe2
 def _check_output_schema(self):
     assert self._output_schema is not None, "Schema is not initialized"
     assert (self._predict_output_schema is None or
             schema.is_schema_subset(self._predict_output_schema,
                                     self._output_schema)), (
         "predict_output_schema is not a subset of the output_schema")
示例#35
0
    def __init__(self,
                 model,
                 input_record,
                 name='batch_lr_loss',
                 average_loss=True,
                 jsd_weight=0.0,
                 pos_label_target=1.0,
                 neg_label_target=0.0,
                 homotopy_weighting=False,
                 log_D_trick=False,
                 unjoined_lr_loss=False,
                 uncertainty_penalty=1.0,
                 focal_gamma=0.0,
                 stop_grad_in_focal_factor=False,
                 task_gamma=1.0,
                 task_gamma_lb=0.1,
                 **kwargs):
        super(BatchLRLoss, self).__init__(model, name, input_record, **kwargs)

        self.average_loss = average_loss

        assert (schema.is_schema_subset(
            schema.Struct(('label', schema.Scalar()),
                          ('logit', schema.Scalar())), input_record))

        self.jsd_fuse = False
        assert jsd_weight >= 0 and jsd_weight <= 1
        if jsd_weight > 0 or homotopy_weighting:
            assert 'prediction' in input_record
            self.init_weight(jsd_weight, homotopy_weighting)
            self.jsd_fuse = True
        self.homotopy_weighting = homotopy_weighting

        assert pos_label_target <= 1 and pos_label_target >= 0
        assert neg_label_target <= 1 and neg_label_target >= 0
        assert pos_label_target >= neg_label_target
        self.pos_label_target = pos_label_target
        self.neg_label_target = neg_label_target

        assert not (log_D_trick and unjoined_lr_loss)
        self.log_D_trick = log_D_trick
        self.unjoined_lr_loss = unjoined_lr_loss
        assert uncertainty_penalty >= 0
        self.uncertainty_penalty = uncertainty_penalty

        self.tags.update([Tags.EXCLUDE_FROM_PREDICTION])

        self.output_schema = schema.Scalar(
            np.float32, self.get_next_blob_reference('output'))

        self.focal_gamma = focal_gamma
        self.stop_grad_in_focal_factor = stop_grad_in_focal_factor

        self.apply_exp_decay = False
        if task_gamma < 1.0:
            self.apply_exp_decay = True
            self.task_gamma_cur = self.create_param(
                param_name=('%s_task_gamma_cur' % self.name),
                shape=[1],
                initializer=('ConstantFill', {
                    'value': 1.0,
                    'dtype': core.DataType.FLOAT
                }),
                optimizer=self.model.NoOptim,
            )

            self.task_gamma = self.create_param(
                param_name=('%s_task_gamma' % self.name),
                shape=[1],
                initializer=('ConstantFill', {
                    'value': task_gamma,
                    'dtype': core.DataType.FLOAT
                }),
                optimizer=self.model.NoOptim,
            )

            self.task_gamma_lb = self.create_param(
                param_name=('%s_task_gamma_lb' % self.name),
                shape=[1],
                initializer=('ConstantFill', {
                    'value': task_gamma_lb,
                    'dtype': core.DataType.FLOAT
                }),
                optimizer=self.model.NoOptim,
            )