Beispiel #1
0
        def test_discriminator(
                z=flow.FixedTensorDef((self.batch_size, 100)),
                images=flow.FixedTensorDef((self.batch_size, 1, 28, 28)),
                label1=flow.FixedTensorDef((self.batch_size, 1)),
                label0=flow.FixedTensorDef((self.batch_size, 1)),
        ):
            g_out = self.generator(z, trainable=False, const_init=True)
            g_logits = self.discriminator(g_out,
                                          trainable=True,
                                          const_init=True)
            d_loss_fake = flow.nn.sigmoid_cross_entropy_with_logits(
                flow.zeros_like(g_logits),
                g_logits,
                name="Dloss_fake_sigmoid_cross_entropy_with_logits",
            )

            d_logits = self.discriminator(images,
                                          trainable=True,
                                          reuse=True,
                                          const_init=True)
            d_loss_real = flow.nn.sigmoid_cross_entropy_with_logits(
                flow.ones_like(d_logits),
                d_logits,
                name="Dloss_real_sigmoid_cross_entropy_with_logits",
            )
            d_loss = d_loss_fake + d_loss_real
            flow.losses.add_loss(d_loss)

            return d_loss
Beispiel #2
0
def val_faceseg_job(image=flow.FixedTensorDef((val_batch_size, 3, img_height, img_width), dtype=flow.float),
                    mask=flow.FixedTensorDef((val_batch_size, 1, img_height, img_width), dtype=flow.float)
                    ):

    feature = LinkNet34(image, trainable=False, batch_size=val_batch_size)  # use linknet34 model to segment face

    loss = BinaryLoss(feature, mask, jaccard_weight=jaccard_weight)

    return loss, feature
Beispiel #3
0
 def xla_layer_norm_param_grad_job(
         dy=flow.FixedTensorDef(shape, dtype=dtype),
         norm=flow.FixedTensorDef(shape, dtype=dtype),
         gamma=flow.FixedTensorDef(gamma_shape, dtype=dtype),
 ):
     return flow.layers.layer_norm_param_grad(dy,
                                              norm,
                                              gamma,
                                              begin_params_axis=params_axis)
Beispiel #4
0
 def xla_layer_norm_grad_job(
     dy=flow.FixedTensorDef(shape, dtype=dtype),
     x=flow.FixedTensorDef(shape, dtype=dtype),
     mean=flow.FixedTensorDef(mean_shape, dtype=dtype),
     inv_variance=flow.FixedTensorDef(mean_shape, dtype=dtype),
 ):
     return flow.layers.layer_norm_grad(
         dy, x, mean, inv_variance, begin_norm_axis=norm_axis
     )
Beispiel #5
0
 def trt_matmul_job(
         a=flow.FixedTensorDef(a_shape, dtype=dtype),
         b=flow.FixedTensorDef(b_shape, dtype=dtype),
 ):
     out = flow.matmul(a, b, transpose_a=trans_a, transpose_b=trans_b)
     c = flow.get_variable(
         "c",
         shape=out.shape,
         dtype=flow.float,
         initializer=flow.ones_initializer(),
         trainable=True,
     )
     out = flow.math.add_n([out, c])
     return out
Beispiel #6
0
        def test_generator(
                z=flow.FixedTensorDef((self.batch_size, self.z_dim)),
                label1=flow.FixedTensorDef((self.batch_size, 1)),
        ):
            g_out = self.generator(z, trainable=True, const_init=True)
            g_logits = self.discriminator(g_out,
                                          trainable=False,
                                          const_init=True)
            g_loss = flow.nn.sigmoid_cross_entropy_with_logits(
                flow.ones_like(g_logits),
                g_logits,
                name="Gloss_sigmoid_cross_entropy_with_logits",
            )

            flow.losses.add_loss(g_loss)
            return g_loss
Beispiel #7
0
 def bn(x=flow.FixedTensorDef((3, 4, 2, 5))):
     params_shape = (5, )
     mean = flow.get_variable(
         name="mean",
         shape=params_shape,
         dtype=flow.float,
         initializer=flow.random_uniform_initializer(),
     )
     variance = flow.get_variable(
         name="var",
         shape=params_shape,
         dtype=flow.float,
         initializer=flow.random_uniform_initializer(),
     )
     gamma = flow.get_variable(
         name="gamma",
         shape=params_shape,
         dtype=flow.float,
         initializer=flow.random_uniform_initializer(),
     )
     beta = flow.get_variable(
         name="beta",
         shape=params_shape,
         dtype=flow.float,
         initializer=flow.random_uniform_initializer(),
     )
     return flow.nn.batch_normalization(x,
                                        mean,
                                        variance,
                                        beta,
                                        gamma,
                                        1e-5,
                                        axis=-1)
Beispiel #8
0
 def bias_add_nhwc(x=flow.FixedTensorDef((3, 4, 2, 5))):
     y = flow.get_variable(
         name="y",
         shape=(5, ),
         dtype=flow.float,
         initializer=flow.random_uniform_initializer(),
     )
     return flow.nn.bias_add(x, y, "NHWC")
Beispiel #9
0
 def trt_avg_pooling_job(x=flow.FixedTensorDef(x_shape, dtype=dtype)):
     return flow.nn.avg_pool2d(
         x,
         ksize=ksize,
         strides=strides,
         padding=padding,
         data_format=data_format,
     )
Beispiel #10
0
    def train_faceseg_job(image=flow.FixedTensorDef((batch_size,3,img_height,img_width), dtype=flow.float),
                        mask=flow.FixedTensorDef((batch_size,1,img_height,img_width), dtype=flow.float)
    ) :

        feature = LinkNet34(image,trainable=True,batch_size=batch_size) # feed input to model, get output features

        feature= flow.reshape(feature,[-1])
        mask = flow.reshape(mask,[-1])

        # loss function for segmentation
        loss = BinaryLoss(feature,mask,jaccard_weight=jaccard_weight)

        lr_scheduler = flow.optimizer.PiecewiseScalingScheduler(0.01, [100, 200, 300], 0.1)

        flow.optimizer.SGD(lr_scheduler, loss_scale_factor=None,
                           momentum= 0.9, grad_clipping=None, train_step_lbn= None).minimize(loss) # set optimizer
        return loss
Beispiel #11
0
 def max_pooling_2d_k3s1_valid_nchw(x=flow.FixedTensorDef((2, 3, 5, 4))):
     x += flow.get_variable(
         name="v1",
         shape=(1, 1),
         dtype=flow.float,
         initializer=flow.zeros_initializer(),
     )
     return flow.nn.max_pool2d(
         x, ksize=3, strides=1, padding="VALID", data_format="NCHW"
     )
Beispiel #12
0
 def max_pooling_2d_k2s2_same_nhwc(x=flow.FixedTensorDef((2, 3, 5, 4))):
     x += flow.get_variable(
         name="v1",
         shape=(1, 1),
         dtype=flow.float,
         initializer=flow.zeros_initializer(),
     )
     return flow.nn.max_pool2d(
         x, ksize=2, strides=2, padding="SAME", data_format="NHWC"
     )
Beispiel #13
0
 def conv2d_k3s1_nhwc_valid(x=flow.FixedTensorDef((2, 3, 5, 4))):
     return flow.layers.conv2d(
         x,
         6,
         kernel_size=3,
         strides=1,
         padding="VALID",
         data_format="NHWC",
         **initer_args
     )
Beispiel #14
0
 def trt_batch_norm_job(x=flow.FixedTensorDef(input_shape, dtype=dtype)):
     out = flow.layers.batch_normalization(x, axis=axis)
     c = flow.get_variable(
         "c",
         shape=out.shape,
         dtype=flow.float,
         initializer=flow.ones_initializer(),
         trainable=True,
     )
     out = flow.math.add_n([out, c])
     return out
Beispiel #15
0
 def conv2d(x=flow.FixedTensorDef((2, 7, 5, 4))):
     return flow.layers.conv2d(
         x,
         6,
         kernel_size=3,
         strides=1,
         dilation_rate=2,
         padding="SAME",
         data_format="NHWC",
         **initer_args
     )
Beispiel #16
0
 def conv2d(x=flow.FixedTensorDef((2, 4, 5, 3))):
     return flow.layers.conv2d(
         x,
         6,
         kernel_size=3,
         strides=1,
         groups=2,
         padding="SAME",
         data_format="NCHW",
         **initer_args
     )
Beispiel #17
0
def variable_scope_test_job_2(a=of.FixedTensorDef((2, 5))):
    with of.scope.namespace("job2_scope1"):
        indices = of.get_variable(
            "gather_inds",
            shape=(2, ),
            dtype=of.int32,
            initializer=of.constant_initializer(1),
            trainable=False,
        )
        output = of.gather(a, indices, axis=1)

    print("indices op name: ", indices.op_name)
    print("gather op name: ", output.op_name)
    return output
Beispiel #18
0
def variable_scope_test_job_1(a=of.FixedTensorDef((1, 3, 6, 6))):
    with of.scope.namespace("job1_scope1"):
        convw = of.get_variable(
            "conv_weight",
            shape=(5, 3, 3, 3),
            dtype=a.dtype,
            initializer=of.random_uniform_initializer(),
            trainable=True,
        )
        conv = of.nn.conv2d(a, convw, 1, "SAME", "NCHW", name="conv")

        with of.scope.namespace("job1_scope2"):
            fcw = of.get_variable(
                "fc_weight",
                shape=(180, 10),
                dtype=a.dtype,
                initializer=of.random_uniform_initializer(),
                trainable=True,
            )
            fc = of.matmul(of.reshape(conv, (conv.shape[0], -1)),
                           fcw,
                           name="fc")
            fcb = of.get_variable(
                "fc_bias",
                shape=(10, ),
                dtype=a.dtype,
                initializer=of.constant_initializer(1.0),
                trainable=True,
            )
            fc_bias = of.nn.bias_add(fc, fcb)

        fcw2 = of.get_variable(
            "fc2_weight",
            shape=(10, 20),
            dtype=a.dtype,
            initializer=of.random_uniform_initializer(),
            trainable=True,
        )
        fc2 = of.matmul(fc_bias, fcw2, name="fc2")

    print("conv_weight op name: ", convw.op_name)
    print("conv op name: ", conv.op_name)
    print("fc_weight op name: ", fcw.op_name)
    print("fc_bias op name: ", fcb.op_name)
    print("fc op name: ", fc.op_name)
    print("fc2_weight op name: ", fcw2.op_name)
    print("fc2 op name: ", fc2.op_name)

    return fc2
Beispiel #19
0
 def reshape(x=flow.FixedTensorDef((3, 4, 2, 5))):
     return flow.reshape(x, (4, 30))
 def mobilenetv2(x=flow.FixedTensorDef((1, 224, 224, 3))):
     return Mobilenet(x)
Beispiel #21
0
 def trt_multiply_job(
     x=flow.FixedTensorDef(x_shape, dtype=dtype),
     y=flow.FixedTensorDef(y_shape, dtype=dtype),
 ):
     return flow.math.multiply(x, y)
Beispiel #22
0
 def softmax(x=flow.FixedTensorDef((3, 5))):
     return flow.nn.softmax(x)
Beispiel #23
0
 def trt_tanh_job(x=flow.FixedTensorDef(input_shape, dtype=dtype)):
     return flow.math.tanh(x)
Beispiel #24
0
 def alexnet_eval_job(x=flow.FixedTensorDef((1, 227, 227, 3))):
     return alexnet(x, None, False)
 def xla_broadcast_add_job(
         x=flow.FixedTensorDef(x_shape, dtype=dtype),
         y=flow.FixedTensorDef(y_shape, dtype=dtype),
 ):
     return flow.math.add(x, y)
 def xla_broadcast_div_job(
         x=flow.FixedTensorDef(x_shape, dtype=dtype),
         y=flow.FixedTensorDef(y_shape, dtype=dtype),
 ):
     return flow.math.divide(x, y)
Beispiel #27
0
 def softmax(x=flow.FixedTensorDef((3, 5, 4))):
     return flow.nn.softmax(x, axis=1)
Beispiel #28
0
 def trt_transpose_job(x=flow.FixedTensorDef(input_shape, dtype=dtype)):
     return flow.transpose(x, perm=permute)
Beispiel #29
0
def Foo(x=flow.FixedTensorDef((2, 5))):
    return x
Beispiel #30
0
 def trt_softmax_job(x=flow.FixedTensorDef(input_shape, dtype=dtype)):
     return flow.nn.softmax(x, axis=axis)