def add_ops(self, net): prediction = net.Squeeze(self.input_record.prediction(), net.NextScopedBlob('squeezed_prediction'), dims=[1]) label = self.input_record.label.field_blobs() if self.input_record.label.field_type().base != ( self.input_record.prediction.field_type().base): label = net.Cast(label, net.NextScopedBlob('cast_label'), to=schema.data_type_for_dtype( self.input_record.prediction.field_type())) const_delta = net.ConstantFill( label, net.NextScopedBlob("delta"), value=self._delta, dtype=core.DataType.FLOAT, ) label = net.StopGradient(label, net.NextScopedBlob('stopped_label')) const_delta = net.StopGradient(const_delta, net.NextScopedBlob('stopped_delta')) # abs_error = np.abs(true - pred) abs_error = net.L1Distance([label, prediction], net.NextScopedBlob("abs_error")) # quadratic = 0.5*min(abs_error, delta)^2, linear = delta*max(abs_error-delta, 0) min_error = net.Min([abs_error, const_delta], net.NextScopedBlob("min_error_delta")) quadratic_term = net.Scale(net.Sqr(min_error), scale=float(0.5)) linear_term = net.Mul([ net.Sub([abs_error, min_error]), const_delta, ], net.NextScopedBlob("huber_linear_term")) # huber = 0.5 * min(abs_error, delta)^2 + delta * max(abs_error-delta, 0) huber_dist = net.Add([quadratic_term, linear_term], net.NextScopedBlob("huber_dist")) if 'weight' in self.input_record.fields: weight_blob = self.input_record.weight() if self.input_record.weight.field_type().base != np.float32: weight_blob = net.Cast(weight_blob, weight_blob + '_float32', to=core.DataType.FLOAT) weight_blob = net.StopGradient( [weight_blob], [net.NextScopedBlob('weight_stop_gradient')], ) huber_dist = net.Mul( [huber_dist, weight_blob], net.NextScopedBlob("weighted_huber_distance"), ) net.AveragedLoss(huber_dist, self.output_schema.field_blobs())
def add_ops(self, net): prediction = net.Squeeze( self.input_record.prediction(), net.NextScopedBlob('squeezed_prediction'), dims=[1] ) label = self.input_record.label.field_blobs() if self.input_record.label.field_type().base != ( self.input_record.prediction.field_type().base): label = net.Cast( label, net.NextScopedBlob('cast_label'), to=schema.data_type_for_dtype( self.input_record.prediction.field_type() ) ) label = net.StopGradient( label, net.NextScopedBlob('stopped_label') ) l2dist = net.SquaredL2Distance( [label, prediction], net.NextScopedBlob('l2') ) net.AveragedLoss(l2dist, self.output_schema.field_blobs())
def __init__(self, init_net, filename, schema, num_passes=1, batch_size=1): """ Create op for building a TextFileReader instance in the workspace. Args: init_net : Net that will be run only once at startup. filename : Path to file to read from. schema : schema.Struct representing the schema of the data. Currently, only support Struct of strings. num_passes : Number of passes over the data. batch_size : Number of rows to read at a time. """ assert isinstance(schema, Struct), 'Schema must be a schema.Struct' for name, child in schema.get_children(): assert isinstance(child, Scalar), ( 'Only scalar fields are supported in TextFileReader.') field_types = [ data_type_for_dtype(dtype) for dtype in schema.field_types()] Reader.__init__(self, schema) self._reader = init_net.CreateTextFileReader( [], filename=filename, num_passes=num_passes, field_types=field_types) self._batch_size = batch_size
def add_ops(self, net): prediction = net.Squeeze(self.input_record.prediction(), net.NextScopedBlob('squeezed_prediction'), dims=[1]) label = self.input_record.label.field_blobs() if self.input_record.label.field_type().base != ( self.input_record.prediction.field_type().base): label = net.Cast(label, net.NextScopedBlob('cast_label'), to=schema.data_type_for_dtype( self.input_record.prediction.field_type())) label = net.StopGradient(label, net.NextScopedBlob('stopped_label')) l2dist = net.SquaredL2Distance([label, prediction], net.NextScopedBlob('l2')) if 'weight' in self.input_record.fields: weight_blob = self.input_record.weight() if self.input_record.weight.field_type().base != np.float32: weight_blob = net.Cast(weight_blob, weight_blob + '_float32', to=core.DataType.FLOAT) weight_blob = net.StopGradient( [weight_blob], [net.NextScopedBlob('weight_stop_gradient')], ) l2dist = net.Mul( [l2dist, weight_blob], net.NextScopedBlob('weighted_l2_distance'), ) net.AveragedLoss(l2dist, self.output_schema.field_blobs())
def __init__(self, init_net, filename, schema, num_passes=1, batch_size=1): """ Create op for building a HiveReader instance in the workspace. Args: init_net : Net that will be run only once at startup. filename : Path to file to read from. schema : schema.Struct representing the schema of the data. Currently, only support Struct of strings. num_passes : Number of passes over the data. batch_size : Number of rows to read at a time. """ assert isinstance(schema, Struct), 'Schema must be a schema.Struct' for name, child in schema.get_children(): assert isinstance(child, Scalar), ( 'Only scalar fields are supported in TextFileReader.') field_types = [ data_type_for_dtype(dtype) for dtype in schema.field_types()] Reader.__init__(self, schema) self._reader = init_net.CreateTextFileReader( [], filename=filename, num_passes=num_passes, field_types=field_types) self._batch_size = batch_size
def add_ops(self, net): prediction = self.input_record.prediction() label = self.input_record.label.field_blobs() if self.input_record.label.field_type().base != ( self.input_record.prediction.field_type().base): label = net.Cast(label, net.NextScopedBlob('cast_label'), to=schema.data_type_for_dtype( self.input_record.prediction.field_type())) label = net.StopGradient(label, net.NextScopedBlob('stopped_label')) l1dist = net.L1Distance([label, prediction], net.NextScopedBlob('l1_dist')) net.AveragedLoss(l1dist, self.output_schema.l1_metric()) scaler, scaler_no_clip = net.ScaleWithClip( [label], [ net.NextScopedBlob('scaler'), net.NextScopedBlob('scaler_no_clip') ], max_scale=self.max_scale, ) scaler = net.StopGradient(scaler, net.NextScopedBlob('stopped_scaler')) scaler_no_clip = net.StopGradient( scaler_no_clip, net.NextScopedBlob('stopped_scaler_no_clip')) scaler = net.Squeeze(scaler, net.NextScopedBlob('squeezed_scaler'), dims=[1]) scaler_no_clip = net.Squeeze( scaler_no_clip, net.NextScopedBlob('squeezed_scaler_no_clip'), dims=[1]) scaled_loss = net.Mul([l1dist, scaler], net.NextScopedBlob('scaled_loss')) scaled_loss_no_clip = net.Mul( [l1dist, scaler_no_clip], net.NextScopedBlob('scaled_loss_no_clip')) net.AveragedLoss(scaled_loss, self.output_schema.loss()) net.AveragedLoss(scaled_loss_no_clip, self.output_schema.scaled_l1_metric())
def _store_parameter(self, parameters, name, value): c2_name = C2.NextBlob(name) if C2.init_net(): C2.init_net().GivenTensorFill( [], c2_name, shape=value.shape, values=value.flatten(), dtype=schema.data_type_for_dtype(value.dtype), ) C2.init_net().AddExternalOutput(c2_name) else: workspace.FeedBlob(c2_name, value) parameters.append(c2_name) return c2_name
def add_ops(self, net): prediction = net.Squeeze( self.input_record.prediction(), net.NextScopedBlob('squeezed_prediction'), dims=[1] ) label = self.input_record.label.field_blobs() if self.input_record.label.field_type().base != ( self.input_record.prediction.field_type().base): label = net.Cast( label, net.NextScopedBlob('cast_label'), to=schema.data_type_for_dtype( self.input_record.prediction.field_type() ) ) label = net.StopGradient( label, net.NextScopedBlob('stopped_label') ) l2dist = net.SquaredL2Distance( [label, prediction], net.NextScopedBlob('l2') ) if 'weight' in self.input_record.fields: weight_blob = self.input_record.weight() if self.input_record.weight.field_type().base != np.float32: weight_blob = net.Cast( weight_blob, weight_blob + '_float32', to=core.DataType.FLOAT ) weight_blob = net.StopGradient( [weight_blob], [net.NextScopedBlob('weight_stop_gradient')], ) l2dist = net.Mul( [l2dist, weight_blob], net.NextScopedBlob('weighted_l2_distance'), ) net.AveragedLoss(l2dist, self.output_schema.field_blobs())