Ejemplo n.º 1
0
def _batch_norm(
    inputs,
    epsilon,
    center=True,
    scale=True,
    trainable=True,
    is_training=True,
    name=None,
):
    return flow.layers.batch_normalization(
        inputs=inputs,
        axis=1,
        momentum=0.9,
        epsilon=epsilon,
        center=center,
        scale=scale,
        beta_initializer=flow.zeros_initializer(),
        gamma_initializer=flow.ones_initializer(),
        beta_regularizer=_get_regularizer(),
        gamma_regularizer=_get_regularizer(),
        moving_mean_initializer=flow.zeros_initializer(),
        moving_variance_initializer=flow.ones_initializer(),
        trainable=trainable,
        training=is_training,
        name=name,
    )
Ejemplo n.º 2
0
 def _batch_norm_add_relu(self, inputs, addend, name=None, last=False):
     if self.fuse_bn_add_relu:
         initializer = flow.zeros_initializer() if last else flow.ones_initializer()
         axis = 1
         if self.data_format == "NHWC":
             axis = 3
         return flow.layers.batch_normalization_add_relu(
             inputs=inputs,
             addend=addend,
             axis=axis,
             momentum=0.9,
             epsilon=1e-5,
             center=True,
             scale=True,
             trainable=self.trainable,
             training=self.training,
             gamma_initializer=initializer,
             moving_variance_initializer=initializer,
             gamma_regularizer=self.weight_regularizer,
             beta_regularizer=self.weight_regularizer,
             name=name + "_bn_add_relu",
         )
     else:
         return flow.nn.relu(
             self._batch_norm(inputs, name + "_bn", last=last) + addend
         )
Ejemplo n.º 3
0
 def test_fn(
         a: flow.typing.Numpy.Placeholder(a_shape),
         b: flow.typing.Numpy.Placeholder(b_shape),
         c: flow.typing.Numpy.Placeholder(c_shape),
 ) -> flow.typing.Numpy:
     var_a = flow.get_variable(
         name="var_a",
         shape=a_shape,
         dtype=flow.float32,
         initializer=flow.ones_initializer(),
         distribute=flow.distribute.split(1),
     )
     a = flow.parallel_cast(a, distribute=flow.distribute.split(1))
     a = var_a * a
     out = flow.matmul(a, b)
     out = flow.parallel_cast(
         out,
         distribute=flow.distribute.broadcast(),
         gradient_distribute=flow.distribute.broadcast(),
     )
     c = flow.parallel_cast(c, distribute=flow.distribute.broadcast())
     out = flow.nn.bias_add(out, c)
     lr_scheduler = flow.optimizer.PiecewiseConstantScheduler([], [0.001])
     flow.optimizer.SGD(lr_scheduler, momentum=0).minimize(out)
     return out
 def foo_job(input_def: oft.Numpy.Placeholder(shape=(2, 5))):
     var = flow.get_variable(
         name="var",
         shape=(2, 5),
         dtype=flow.float,
         initializer=flow.ones_initializer(),
     )
     input_def = flow.cast_to_current_logical_view(input_def)
     var = flow.cast_to_current_logical_view(var)
     output = var + input_def
     return output
Ejemplo n.º 5
0
 def trt_batch_norm_job(x=flow.FixedTensorDef(input_shape, dtype=dtype)):
     out = flow.layers.batch_normalization(x, axis=axis)
     c = flow.get_variable(
         "c",
         shape=out.shape,
         dtype=flow.float,
         initializer=flow.ones_initializer(),
         trainable=True,
     )
     out = flow.math.add_n([out, c])
     return out
def _get_initializer(model_name):
    if model_name == "weight":
        return flow.variance_scaling_initializer(2.0,
                                                 mode="fan_out",
                                                 distribution="random_normal",
                                                 data_format="NCHW")
    elif model_name == "bias":
        return flow.zeros_initializer()
    elif model_name == "gamma":
        return flow.ones_initializer()
    elif model_name == "beta":
        return flow.zeros_initializer()
    elif model_name == "dense_weight":
        return flow.random_normal_initializer(0, 0.01)
Ejemplo n.º 7
0
 def xla_matmul_job(
         a=flow.FixedTensorDef(a_shape, dtype=dtype),
         b=flow.FixedTensorDef(b_shape, dtype=dtype),
 ):
     out = flow.matmul(a, b, transpose_a=trans_a, transpose_b=trans_b)
     c = flow.get_variable(
         "c",
         shape=out.shape,
         dtype=flow.float,
         initializer=flow.ones_initializer(),
         trainable=True,
     )
     out = flow.math.add_n([out, c])
     return out
Ejemplo n.º 8
0
 def ExpandDimsJob():
     with flow.scope.placement(device_type, "0:0"):
         x = flow.get_variable(
             "var",
             shape=x_shape,
             dtype=flow.float,
             initializer=flow.ones_initializer(),
             trainable=True,
         )
         flow.watch_diff(x, check_grad)
         loss = flow.expand_dims(x, axis)
         flow.optimizer.SGD(flow.optimizer.PiecewiseConstantScheduler(
             [], [0.0001]),
                            momentum=0).minimize(loss)
         return loss
Ejemplo n.º 9
0
 def DropoutJob() -> flow.typing.Numpy:
     with flow.scope.placement(device_type, "0:0"):
         x = flow.get_variable(
             "x",
             shape=x_shape,
             dtype=dtype,
             initializer=flow.ones_initializer(),
             trainable=True,
         )
         of_out = flow.nn.dropout(x, rate=rate, name="dropout")
         loss = flow.math.square(of_out)
         flow.optimizer.SGD(flow.optimizer.PiecewiseConstantScheduler(
             [], [0.0001]),
                            momentum=0).minimize(loss)
         return of_out
Ejemplo n.º 10
0
def instance_norm(input, name_prefix, trainable=True):
    (mean, variance) = flow.nn.moments(input, [2, 3], keepdims=True)
    gamma = flow.get_variable(
        name_prefix + "_gamma",
        shape=(1, input.shape[1], 1, 1),
        dtype=input.dtype,
        initializer=flow.ones_initializer(),
        trainable=trainable,
    )
    beta = flow.get_variable(
        name_prefix + "_beta",
        shape=(1, input.shape[1], 1, 1),
        dtype=input.dtype,
        initializer=flow.zeros_initializer(),
        trainable=trainable,
    )
    epsilon = 0.001
    normalized = (input - mean) / flow.math.sqrt(variance + epsilon)
    return gamma * normalized + beta
Ejemplo n.º 11
0
 def _batch_norm(self, inputs, name=None, last=False):
     initializer = flow.zeros_initializer() if last else flow.ones_initializer()
     axis = 1
     if self.data_format == "NHWC":
         axis = 3
     return flow.layers.batch_normalization(
         inputs=inputs,
         axis=axis,
         momentum=0.9,
         epsilon=1e-05,
         center=True,
         scale=True,
         trainable=self.trainable,
         training=self.training,
         gamma_initializer=initializer,
         moving_variance_initializer=initializer,
         gamma_regularizer=self.weight_regularizer,
         beta_regularizer=self.weight_regularizer,
         name=name,
     )
Ejemplo n.º 12
0
def _batch_norm(inputs, last=False):
    initializer = flow.zeros_initializer() if last else flow.ones_initializer()
    axis = 1
    weight_regularizer = flow.regularizers.l2(0.5)
    trainable = True
    training = True
    data_format = "NHWC"
    if data_format == "NHWC":
        axis = 3
    return flow.layers.batch_normalization(
        inputs=inputs,
        axis=axis,
        momentum=0.9,  # 97,
        epsilon=1e-5,
        center=True,
        scale=True,
        trainable=trainable,
        training=training,
        gamma_initializer=initializer,
        moving_variance_initializer=initializer,
        gamma_regularizer=weight_regularizer,
        beta_regularizer=weight_regularizer,
    )