Ejemplo n.º 1
0
    def MatmulJob():
        with flow.scope.placement(device_type, "0:0"):
            a = flow.get_variable(
                "a",
                shape=a_shape,
                dtype=flow.float,
                initializer=flow.random_uniform_initializer(minval=-10, maxval=10),
                trainable=True,
            )
            b = flow.get_variable(
                "b",
                shape=b_shape,
                dtype=flow.float,
                initializer=flow.random_uniform_initializer(minval=-10, maxval=10),
                trainable=True,
            )
            loss = flow.matmul(a, b, transpose_a, transpose_b)
            flow.losses.add_loss(loss)

            flow.watch(a, test_global_storage.Setter("a"))
            flow.watch_diff(a, test_global_storage.Setter("a_diff"))
            flow.watch(b, test_global_storage.Setter("b"))
            flow.watch_diff(b, test_global_storage.Setter("b_diff"))
            flow.watch(loss, test_global_storage.Setter("loss"))
            flow.watch_diff(loss, test_global_storage.Setter("loss_diff"))

            return loss
Ejemplo n.º 2
0
    def TestMultiInputJob():
        with flow.scope.placement("gpu", "0:0"):
            x1 = flow.get_variable(
                "x1",
                shape=shape,
                dtype=flow.float,
                initializer=flow.random_uniform_initializer(minval=-10,
                                                            maxval=10),
                trainable=True,
            )
            x2 = flow.get_variable(
                "x2",
                shape=shape,
                dtype=flow.float,
                initializer=flow.random_uniform_initializer(minval=-10,
                                                            maxval=10),
                trainable=True,
            )
            loss = TestMultiInput(x1, x2)
            flow.optimizer.SGD(flow.optimizer.PiecewiseConstantScheduler(
                [], [1e-4]),
                               momentum=0).minimize(loss)

            flow.watch(x1, test_global_storage.Setter("x1"))
            flow.watch_diff(x1, test_global_storage.Setter("x1_diff"))
            flow.watch(x2, test_global_storage.Setter("x2"))
            flow.watch_diff(x2, test_global_storage.Setter("x2_diff"))
            return loss
def test_job() -> tp.Numpy:
    batch_size = 10
    with flow.scope.placement("cpu", "0:0"):
        miniRecord = MiniReader(
            "./",
            batch_size=batch_size,
            data_part_num=2,
            part_name_suffix_length=3,
            random_shuffle=True,
            shuffle_after_epoch=True,
        )

        x, y = MiniDecoder(miniRecord, name="d1")

        initializer1 = flow.random_uniform_initializer(-1 / 28.0, 1 / 28.0)
        hidden = flow.layers.dense(
            x,
            500,
            activation=flow.nn.relu,
            kernel_initializer=initializer1,
            bias_initializer=initializer1,
            name="dense1",
        )
        initializer2 = flow.random_uniform_initializer(-np.sqrt(1 / 500.0),
                                                       np.sqrt(1 / 500.0))
        logits = flow.layers.dense(hidden,
                                   1,
                                   kernel_initializer=initializer2,
                                   bias_initializer=initializer2,
                                   name="dense2")
        loss = (y - logits) * (y - logits)

        lr_scheduler = flow.optimizer.PiecewiseConstantScheduler([], [0.001])
        flow.optimizer.Adam(lr_scheduler).minimize(loss)
        return loss
Ejemplo n.º 4
0
 def build_network(self,inputs):
     # weight_2d = self.conv2d.weight.data
     # weight_3d=np.zeros(weight_2d.shape)
     # weight_3d=flow.expand_dims(weight_3d,axis=2)
     # weight_3d[:, :, 0, :, :] = weight_2d
     # init=flow.constant_initializer(weight_3d)
     init=flow.random_uniform_initializer(minval=0, maxval=0.5)
     out=conv3d_layer(
         "APP3DA_spatial_",inputs, self.conv2d.out_channels, 
         kernel_size=self.kernel_dim, 
         strides=self.stride, padding=self.padding,use_bias=True,weight_initializer=init,
         trainable=self.trainable
     
     )
     self.kernel_dim=[self.time_dim, 1, 1]
     self.stride= [self.time_stride*self.time_dim, 1, 1]
     # weight_2d=np.eye(self.conv2d.out_channels)
     # weight_2d=flow.expand_dims(weight_2d,axis=2)
     # weight_2d=flow.expand_dims(weight_2d,axis=2)
     # weight_3d=np.zeros(weight_2d.shape)
     # weight_3d=flow.expand_dims(weight_3d,axis=2)
     # weight_3d=np.tile(weight_3d,(1,1,self.time_dim,1,1))
     # middle_idx = self.time_dim // 2
     # weight_3d[:, :, middle_idx, :, :] = weight_2d
     init=flow.random_uniform_initializer(minval=0, maxval=0.5)
     #init=flow.constant_initializer(weight_3d)
     out=conv3d_layer(
 
         "APP3DA_temporal_",self.APM.build_network(inputs),self.conv2d.out_channels, 
         kernel_size=self.kernel_dim,
         strides=self.stride, padding="SAME",use_bias=False,weight_initializer=init,
          trainable=self.trainable
     )
     return out
Ejemplo n.º 5
0
 def bn(x=flow.FixedTensorDef((3, 4, 2, 5))):
     params_shape = (5, )
     mean = flow.get_variable(
         name="mean",
         shape=params_shape,
         dtype=flow.float,
         initializer=flow.random_uniform_initializer(),
     )
     variance = flow.get_variable(
         name="var",
         shape=params_shape,
         dtype=flow.float,
         initializer=flow.random_uniform_initializer(),
     )
     gamma = flow.get_variable(
         name="gamma",
         shape=params_shape,
         dtype=flow.float,
         initializer=flow.random_uniform_initializer(),
     )
     beta = flow.get_variable(
         name="beta",
         shape=params_shape,
         dtype=flow.float,
         initializer=flow.random_uniform_initializer(),
     )
     return flow.nn.batch_normalization(x,
                                        mean,
                                        variance,
                                        beta,
                                        gamma,
                                        1e-5,
                                        axis=-1)
Ejemplo n.º 6
0
 def FusedCastScaleJob():
     with flow.scope.placement(device_type, "0:0"):
         x = flow.get_variable(
             "in",
             shape=input_shape,
             dtype=flow.float,
             initializer=flow.random_uniform_initializer(),
             trainable=True,
         )
         scale = flow.get_variable(
             "scale",
             shape=(1, ),
             dtype=flow.float,
             initializer=flow.random_uniform_initializer(),
             trainable=False,
         )
         loss = flow.cast(x, dtype=type_name_to_flow_type[in_dtype])
         if test_fuse_cast_scale_pass:
             loss = flow.cast(
                 loss, dtype=type_name_to_flow_type[out_dtype]) * flow.cast(
                     scale, dtype=type_name_to_flow_type[out_dtype])
         else:
             loss = fused_cast_scale(
                 loss,
                 flow.cast(scale, dtype=type_name_to_flow_type[out_dtype]),
                 name="fused_cast_scale",
             )
         loss = flow.cast(loss, dtype=flow.float)
         flow.watch(x, test_global_storage.Setter("x"))
         flow.watch(scale, test_global_storage.Setter("scale"))
         flow.watch(loss, test_global_storage.Setter("loss"))
         return loss
Ejemplo n.º 7
0
    def MatmulJob():
        with flow.scope.placement(device_type, "0:0"):
            a = flow.get_variable(
                "a",
                shape=a_shape,
                dtype=flow.float,
                initializer=flow.random_uniform_initializer(minval=-10,
                                                            maxval=10),
                trainable=True,
            )
            b = flow.get_variable(
                "b",
                shape=b_shape,
                dtype=flow.float,
                initializer=flow.random_uniform_initializer(minval=-10,
                                                            maxval=10),
                trainable=True,
            )
            loss = flow.matmul(a, b, transpose_a, transpose_b)
            flow.optimizer.SGD(flow.optimizer.PiecewiseConstantScheduler(
                [], [1e-4]),
                               momentum=0).minimize(loss)

            flow.watch(a, test_global_storage.Setter("a"))
            flow.watch_diff(a, test_global_storage.Setter("a_diff"))
            flow.watch(b, test_global_storage.Setter("b"))
            flow.watch_diff(b, test_global_storage.Setter("b_diff"))
            flow.watch(loss, test_global_storage.Setter("loss"))
            flow.watch_diff(loss, test_global_storage.Setter("loss_diff"))

            return loss
    def ScalarAddByTensorJob():
        with flow.scope.placement(device_type, "0:0"):
            x = flow.get_variable(
                "x",
                shape=x_shape,
                dtype=flow.float,
                initializer=flow.random_uniform_initializer(minval=0, maxval=100),
                trainable=True,
            )
            y = flow.get_variable(
                "y",
                shape=(1,),
                dtype=flow.float,
                initializer=flow.random_uniform_initializer(minval=0, maxval=100),
                trainable=True,
            )
            if case == "add":
                loss = flow.math.add(x, y)
            elif case == "sub":
                loss = flow.math.subtract(x, y)
            elif case == "mul":
                loss = flow.math.multiply(x, y)
            elif case == "div":
                loss = flow.math.divide(x, y)
            flow.losses.add_loss(loss)

            flow.watch(x, test_global_storage.Setter("x"))
            flow.watch(y, test_global_storage.Setter("y"))
            flow.watch_diff(x, test_global_storage.Setter("x_diff"))
            flow.watch_diff(y, test_global_storage.Setter("y_diff"))
            flow.watch(loss, test_global_storage.Setter("loss"))
            flow.watch_diff(loss, test_global_storage.Setter("loss_diff"))

            return loss
Ejemplo n.º 9
0
def train_job(
    images: tp.Numpy.Placeholder((BATCH_SIZE, 1, 28, 28), dtype=flow.float),
    labels: tp.Numpy.Placeholder((BATCH_SIZE, ), dtype=flow.int32),
) -> tp.Numpy:
    with flow.scope.placement("cpu", "0:0"):
        reshape = flow.reshape(images, [images.shape[0], -1])
        initializer1 = flow.random_uniform_initializer(-1 / 28.0, 1 / 28.0)
        hidden = flow.layers.dense(
            reshape,
            500,
            activation=flow.nn.relu,
            kernel_initializer=initializer1,
            bias_initializer=initializer1,
            name="dense1",
        )
        initializer2 = flow.random_uniform_initializer(-np.sqrt(1 / 500.0),
                                                       np.sqrt(1 / 500.0))
        logits = flow.layers.dense(hidden,
                                   10,
                                   kernel_initializer=initializer2,
                                   bias_initializer=initializer2,
                                   name="dense2")
        loss = flow.nn.sparse_softmax_cross_entropy_with_logits(labels, logits)

    lr_scheduler = flow.optimizer.PiecewiseConstantScheduler([], [0.001])
    flow.optimizer.Adam(lr_scheduler).minimize(loss)
    return loss
Ejemplo n.º 10
0
    def ConcatJob():
        with flow.scope.placement(device_type, "0:0"):
            x = flow.get_variable(
                "x",
                shape=x_shape,
                dtype=type_name_to_flow_type[dtype],
                initializer=flow.random_uniform_initializer(minval=-10,
                                                            maxval=10),
                trainable=True,
            )
            y = flow.get_variable(
                "y",
                shape=y_shape,
                dtype=type_name_to_flow_type[dtype],
                initializer=flow.random_uniform_initializer(minval=-10,
                                                            maxval=10),
                trainable=True,
            )
            x = flow.cast_to_current_logical_view(x)
            y = flow.cast_to_current_logical_view(y)
            loss = flow.concat([x, y], axis)
            flow.optimizer.SGD(flow.optimizer.PiecewiseConstantScheduler(
                [], [1e-4]),
                               momentum=0).minimize(loss)

            flow.watch(x, test_global_storage.Setter("x"))
            flow.watch_diff(x, test_global_storage.Setter("x_diff"))
            flow.watch(y, test_global_storage.Setter("y"))
            flow.watch_diff(y, test_global_storage.Setter("y_diff"))
            flow.watch(loss, test_global_storage.Setter("loss"))
            flow.watch_diff(loss, test_global_storage.Setter("loss_diff"))

            return loss
Ejemplo n.º 11
0
    def MatmulJob():
        with flow.scope.placement(device_type, "0:0"):
            a = flow.get_variable(
                "a",
                shape=a_shape,
                dtype=dtype,
                initializer=flow.random_uniform_initializer(minval=0, maxval=1),
                trainable=True,
            )
            b = flow.get_variable(
                "b",
                shape=b_shape,
                dtype=dtype,
                initializer=flow.random_uniform_initializer(minval=0, maxval=1),
                trainable=True,
            )
            if data_type == "float16":
                out = flow.matmul(
                    flow.cast(a, dtype=flow.float16),
                    flow.cast(b, dtype=flow.float16),
                    transpose_a,
                    transpose_b,
                    alpha,
                )
                c = flow.get_variable(
                    "c",
                    shape=out.shape,
                    dtype=dtype,
                    initializer=flow.random_uniform_initializer(minval=-1, maxval=1),
                    trainable=True,
                )
                loss = flow.cast(
                    out + flow.cast(c, dtype=flow.float16), dtype=flow.float
                )
            else:
                out = flow.matmul(a, b, transpose_a, transpose_b, alpha)
                c = flow.get_variable(
                    "c",
                    shape=out.shape,
                    dtype=dtype,
                    initializer=flow.random_uniform_initializer(minval=-1, maxval=1),
                    trainable=True,
                )
                loss = out + c

            flow.optimizer.SGD(
                flow.optimizer.PiecewiseConstantScheduler([], [1e-4]), momentum=0
            ).minimize(loss)

            flow.watch(a, test_global_storage.Setter("a"))
            flow.watch_diff(a, test_global_storage.Setter("a_diff"))
            flow.watch(b, test_global_storage.Setter("b"))
            flow.watch_diff(b, test_global_storage.Setter("b_diff"))
            flow.watch(c, test_global_storage.Setter("c"))
            flow.watch_diff(c, test_global_storage.Setter("c_diff"))
            flow.watch(loss, test_global_storage.Setter("loss"))
            flow.watch_diff(loss, test_global_storage.Setter("loss_diff"))

            return loss
def _model(dense_fields, wide_sparse_fields, deep_sparse_fields):
    wide_sparse_fields = flow.parallel_cast(
        wide_sparse_fields, distribute=flow.distribute.broadcast())
    wide_embedding_table = flow.get_variable(
        name='wide_embedding',
        shape=(FLAGS.wide_vocab_size, 1),
        initializer=flow.random_uniform_initializer(minval=-0.05, maxval=0.05),
        distribute=flow.distribute.split(0),
    )
    wide_embedding = flow.gather(params=wide_embedding_table,
                                 indices=wide_sparse_fields)
    wide_embedding = flow.reshape(wide_embedding,
                                  shape=(-1, wide_embedding.shape[-1] *
                                         wide_embedding.shape[-2]))
    wide_scores = flow.math.reduce_sum(wide_embedding, axis=[1], keepdims=True)
    wide_scores = flow.parallel_cast(
        wide_scores,
        distribute=flow.distribute.split(0),
        gradient_distribute=flow.distribute.broadcast())

    deep_sparse_fields = flow.parallel_cast(
        deep_sparse_fields, distribute=flow.distribute.broadcast())
    deep_embedding_table = flow.get_variable(
        name='deep_embedding',
        shape=(FLAGS.deep_vocab_size, FLAGS.deep_embedding_vec_size),
        initializer=flow.random_uniform_initializer(minval=-0.05, maxval=0.05),
        distribute=flow.distribute.split(1),
    )
    deep_embedding = flow.gather(params=deep_embedding_table,
                                 indices=deep_sparse_fields)
    deep_embedding = flow.parallel_cast(
        deep_embedding,
        distribute=flow.distribute.split(0),
        gradient_distribute=flow.distribute.split(2))
    deep_embedding = flow.reshape(deep_embedding,
                                  shape=(-1, deep_embedding.shape[-1] *
                                         deep_embedding.shape[-2]))
    deep_features = flow.concat([deep_embedding, dense_fields], axis=1)
    for idx, units in enumerate(DEEP_HIDDEN_UNITS):
        deep_features = flow.layers.dense(
            deep_features,
            units=units,
            kernel_initializer=flow.glorot_uniform_initializer(),
            bias_initializer=flow.constant_initializer(0.0),
            activation=flow.math.relu,
            name='fc' + str(idx + 1))
        deep_features = flow.nn.dropout(deep_features,
                                        rate=FLAGS.deep_dropout_rate)
    deep_scores = flow.layers.dense(
        deep_features,
        units=1,
        kernel_initializer=flow.glorot_uniform_initializer(),
        bias_initializer=flow.constant_initializer(0.0),
        name='fc' + str(len(DEEP_HIDDEN_UNITS) + 1))

    scores = wide_scores + deep_scores
    return scores
Ejemplo n.º 13
0
def _conv2d_layer(
        name,
        input,
        filters,
        kernel_size=3,
        strides=1,
        padding="SAME",
        group_num=1,
        data_format="NCHW",
        dilation_rate=1,
        activation=op_conf_util.kRelu,
        use_bias=False,
        weight_initializer=flow.random_uniform_initializer(),
        bias_initializer=flow.random_uniform_initializer(),
        trainable=True,
):
    if data_format == "NCHW":
        weight_shape = (int(filters), int(input.shape[1]), int(kernel_size[0]),
                        int(kernel_size[0]))
    elif data_format == "NHWC":
        weight_shape = (int(filters), int(kernel_size[0]), int(kernel_size[0]),
                        int(input.static_shape[3]))
    else:
        raise ValueError('data_format must be "NCHW" or "NHWC".')
    weight = flow.get_variable(
        name + "-weight",
        shape=weight_shape,
        dtype=input.dtype,
        initializer=weight_initializer,
        trainable=trainable,
    )
    output = flow.nn.conv2d(input,
                            weight,
                            strides,
                            padding,
                            data_format,
                            dilation_rate,
                            name=name)
    if use_bias:
        bias = flow.get_variable(
            name + "-bias",
            shape=(filters, ),
            dtype=input.dtype,
            initializer=bias_initializer,
            model_name="bias",
            trainable=trainable,
        )
        output = flow.nn.bias_add(output, bias, data_format)

    if activation is not None:
        if activation == op_conf_util.kRelu:
            output = flow.keras.activations.relu(output)
        else:
            raise NotImplementedError

    return output
Ejemplo n.º 14
0
    def ConvJob():
        with flow.scope.placement(device_type, "0:0"):
            x = flow.get_variable(
                "x",
                shape=x_shape,
                dtype=flow.float,
                initializer=flow.random_uniform_initializer(minval=0,
                                                            maxval=100),
                trainable=True,
            )
            if data_format == "NCDHW":
                weight_shape = (
                    filters,
                    x.shape[1] // groups,
                    kernel_size,
                    kernel_size,
                    kernel_size,
                )
            else:
                weight_shape = (
                    filters,
                    kernel_size,
                    kernel_size,
                    kernel_size,
                    x.shape[4] // groups,
                )
            weight = flow.get_variable(
                "conv-weight",
                shape=weight_shape,
                dtype=flow.float,
                initializer=flow.random_uniform_initializer(minval=0,
                                                            maxval=100),
            )
            loss = flow.nn.conv3d(
                x,
                weight,
                strides=[stride_d, stride_h, stride_w],
                padding=of_padding,
                data_format=data_format,
                dilations=[dilation_d, dilation_h, dilation_w],
                groups=groups,
            )
            flow.optimizer.SGD(flow.optimizer.PiecewiseConstantScheduler(
                [], [1e-4]),
                               momentum=0).minimize(loss)

            flow.watch(x, test_global_storage.Setter("x"))
            flow.watch_diff(x, test_global_storage.Setter("x_diff"))
            flow.watch(weight, test_global_storage.Setter("weight"))
            flow.watch_diff(weight, test_global_storage.Setter("weight_diff"))
            flow.watch(loss, test_global_storage.Setter("loss"))
            flow.watch_diff(loss, test_global_storage.Setter("loss_diff"))

            return loss
Ejemplo n.º 15
0
 def TestMultiOptimizerJob():
     with flow.scope.placement(device_type, "0:0-0"):
         var1 = flow.get_variable(
             name="var1",
             shape=var1_shape,
             dtype=flow.float32,
             initializer=flow.random_uniform_initializer(minval=0,
                                                         maxval=100),
             trainable=True,
         )
         var2 = flow.get_variable(
             name="var2",
             shape=var2_shape,
             dtype=flow.float32,
             initializer=flow.random_uniform_initializer(minval=0,
                                                         maxval=100),
             trainable=True,
         )
         var3 = flow.get_variable(
             name="var3",
             shape=var3_shape,
             dtype=flow.float32,
             initializer=flow.random_uniform_initializer(minval=0,
                                                         maxval=100),
             trainable=True,
         )
         loss = flow.math.reduce_sum(var1 + var2 + var3)
         sgd_opt = flow.optimizer.SGD(
             flow.optimizer.PiecewiseConstantScheduler(
                 [], [sgd_opt_args["lr"]]),
             momentum=sgd_opt_args["momentum"],
             variables=["var1"],
         )
         rmsprop_opt = flow.optimizer.RMSProp(
             flow.optimizer.PiecewiseConstantScheduler(
                 [], [rmsprop_opt_args["lr"]]),
             decay_rate=rmsprop_opt_args["decay_rate"],
             epsilon=0,
             centered=rmsprop_opt_args["centered"],
             variables=["var2"],
         )
         adam_opt = flow.optimizer.Adam(
             flow.optimizer.PiecewiseConstantScheduler(
                 [], [adam_opt_args["lr"]]),
             beta1=adam_opt_args["beta1"],
             beta2=adam_opt_args["beta2"],
             epsilon=adam_opt_args["epsilon"],
             do_bias_correction=True,
             variables=["var3"],
         )
         flow.optimizer.CombinedOptimizer([sgd_opt, rmsprop_opt,
                                           adam_opt]).minimize(loss)
         return (var1, var2, var3)
 def add() -> tp.Numpy:
     with flow.scope.placement("gpu", "0:0-1"):
         x = flow.get_variable(
             name="x",
             shape=(2, 3),
             initializer=flow.random_uniform_initializer(),
         )
         y = flow.get_variable(
             name="y",
             shape=(2, 3),
             initializer=flow.random_uniform_initializer(),
         )
         return flow.math.add_n([x, y])
Ejemplo n.º 17
0
def _data_loader_synthetic(batch_size):
    devices = ['{}:0-{}'.format(i, FLAGS.gpu_num_per_node - 1) for i in range(FLAGS.num_nodes)]
    with flow.scope.placement("cpu", devices):
        def _blob_random(shape, dtype=flow.int32, initializer=flow.zeros_initializer(flow.int32)):
            return flow.data.decode_random(shape=shape, dtype=dtype, batch_size=batch_size, 
                                           initializer=initializer)
        labels = _blob_random((1,), initializer=flow.random_uniform_initializer(dtype=flow.int32))
        dense_fields = _blob_random((FLAGS.num_dense_fields,), dtype=flow.float, 
                                    initializer=flow.random_uniform_initializer())
        wide_sparse_fields = _blob_random((FLAGS.num_wide_sparse_fields,))
        deep_sparse_fields = _blob_random((FLAGS.num_deep_sparse_fields,))
        print('use synthetic data')
    return flow.identity_n([labels, dense_fields, wide_sparse_fields, deep_sparse_fields])
Ejemplo n.º 18
0
def _data_loader_ofrecord(data_dir,
                          data_part_num,
                          batch_size,
                          part_name_suffix_length=-1,
                          shuffle=True):
    if data_dir:
        ofrecord = flow.data.ofrecord_reader(
            data_dir,
            batch_size=batch_size,
            data_part_num=data_part_num,
            part_name_suffix_length=part_name_suffix_length,
            random_shuffle=shuffle,
            shuffle_after_epoch=shuffle)

        def _blob_decoder(bn, shape, dtype=flow.int32):
            return flow.data.OFRecordRawDecoder(ofrecord,
                                                bn,
                                                shape=shape,
                                                dtype=dtype)

        labels = _blob_decoder("labels", (1, ))
        dense_fields = _blob_decoder("dense_fields",
                                     (FLAGS.num_dense_fields, ), flow.float)
        wide_sparse_fields = _blob_decoder("wide_sparse_fields",
                                           (FLAGS.num_wide_sparse_fields, ))
        deep_sparse_fields = _blob_decoder("deep_sparse_fields",
                                           (FLAGS.num_deep_sparse_fields, ))
        print('load data form', data_dir)
    else:

        def _blob_random(shape,
                         dtype=flow.int32,
                         initializer=flow.zeros_initializer(flow.int32)):
            return flow.data.decode_random(shape=shape,
                                           dtype=dtype,
                                           batch_size=batch_size,
                                           initializer=initializer)

        labels = _blob_random(
            (1, ),
            initializer=flow.random_uniform_initializer(dtype=flow.int32))
        dense_fields = _blob_random(
            (FLAGS.num_dense_fields, ),
            dtype=flow.float,
            initializer=flow.random_uniform_initializer())
        wide_sparse_fields = _blob_random((FLAGS.num_wide_sparse_fields, ))
        deep_sparse_fields = _blob_random((FLAGS.num_deep_sparse_fields, ))
        print('use synthetic data')

    return flow.identity_n(
        [labels, dense_fields, wide_sparse_fields, deep_sparse_fields])
Ejemplo n.º 19
0
    def DeconvJob():
        with flow.scope.placement(device_type, "0:0"):
            x = flow.get_variable(
                "x",
                shape=input_shape,
                dtype=flow.float,
                initializer=flow.random_uniform_initializer(minval=-10,
                                                            maxval=10),
                trainable=True,
            )
            if data_format == "NCHW":
                weight = flow.get_variable(
                    "weight",
                    shape=(in_channels, out_channels, kernel_size,
                           kernel_size),
                    dtype=flow.float,
                    initializer=flow.random_uniform_initializer(minval=-10,
                                                                maxval=10),
                    trainable=True,
                )
            else:
                weight = flow.get_variable(
                    "weight",
                    shape=(in_channels, kernel_size, kernel_size,
                           out_channels),
                    dtype=flow.float,
                    initializer=flow.random_uniform_initializer(minval=-10,
                                                                maxval=10),
                    trainable=True,
                )
            loss = flow.nn.conv2d_transpose(
                x,
                weight,
                strides=strides,
                output_shape=output_shape,
                dilations=dilations,
                padding=padding,
                data_format=data_format,
            )
            flow.optimizer.SGD(flow.optimizer.PiecewiseConstantScheduler(
                [], [1e-4]),
                               momentum=0).minimize(loss)

            flow.watch(x, test_global_storage.Setter("x"))
            flow.watch_diff(x, test_global_storage.Setter("x_diff"))
            flow.watch(weight, test_global_storage.Setter("weight"))
            flow.watch_diff(weight, test_global_storage.Setter("weight_diff"))
            flow.watch(loss, test_global_storage.Setter("loss"))
            flow.watch_diff(loss, test_global_storage.Setter("loss_diff"))

            return loss
Ejemplo n.º 20
0
 def gather_nd():
     x = flow.get_variable(
         name="x",
         shape=(2, 3, 4),
         dtype=flow.float,
         initializer=flow.random_uniform_initializer(),
     )
     y = flow.get_variable(
         name="y",
         shape=(2, 3),
         dtype=flow.int64,
         initializer=flow.random_uniform_initializer(0, 1, flow.int64),
     )
     return flow.gather_nd(x, y)
Ejemplo n.º 21
0
    def ConvJob():
        with flow.scope.placement(device_type, "0:0"):
            x = flow.get_variable(
                "x",
                shape=x_shape,
                dtype=flow.float,
                initializer=flow.random_uniform_initializer(minval=0,
                                                            maxval=100),
                trainable=True,
            )
            loss = flow.layers.conv3d(
                x,
                filters,
                kernel_size=kernel_size,
                strides=1,
                padding="valid",
                data_format="NCDHW",
                dilation_rate=1,
                groups=groups,
                use_bias=False,
                kernel_initializer=flow.random_uniform_initializer(minval=0,
                                                                   maxval=100),
                weight_name="conv3d_weight",
            )
            weight_shape = (
                filters,
                x.shape[1] // groups,
                kernel_size,
                kernel_size,
                kernel_size,
            )
            weight = flow.get_variable(
                name="conv3d_weight",
                shape=weight_shape,
                dtype=flow.float,
                initializer=flow.random_uniform_initializer(minval=0,
                                                            maxval=100),
            )
            flow.optimizer.SGD(flow.optimizer.PiecewiseConstantScheduler(
                [], [1e-4]),
                               momentum=0).minimize(loss)

            flow.watch(x, test_global_storage.Setter("x"))
            flow.watch_diff(x, test_global_storage.Setter("x_diff"))
            flow.watch(weight, test_global_storage.Setter("weight"))
            flow.watch_diff(weight, test_global_storage.Setter("weight_diff"))
            flow.watch(loss, test_global_storage.Setter("loss"))
            flow.watch_diff(loss, test_global_storage.Setter("loss_diff"))

            return loss
Ejemplo n.º 22
0
 def matmul():
     a = flow.get_variable(
         name="a",
         shape=(3, 2),
         dtype=flow.float,
         initializer=flow.random_uniform_initializer(),
     )
     b = flow.get_variable(
         name="b",
         shape=(4, 3),
         dtype=flow.float,
         initializer=flow.random_uniform_initializer(),
     )
     return flow.matmul(a, b, transpose_a=True, transpose_b=True)
Ejemplo n.º 23
0
 def matmul():
     a = flow.get_variable(
         name="a",
         shape=(2, 3),
         dtype=flow.float,
         initializer=flow.random_uniform_initializer(),
     )
     b = flow.get_variable(
         name="b",
         shape=(3, 4),
         dtype=flow.float,
         initializer=flow.random_uniform_initializer(),
     )
     return flow.matmul(a, b)
Ejemplo n.º 24
0
 def add_2():
     x = flow.get_variable(
         name="x",
         shape=(2, 3),
         dtype=flow.float,
         initializer=flow.random_uniform_initializer(),
     )
     y = flow.get_variable(
         name="y",
         shape=(2, 3),
         dtype=flow.float,
         initializer=flow.random_uniform_initializer(),
     )
     return flow.math.add_n([x, y])
Ejemplo n.º 25
0
 def job1():
     x = flow.get_variable(
         name="x1",
         shape=(2, 3, 4),
         dtype=flow.float,
         initializer=flow.random_uniform_initializer(-10, 10),
     )
     y = flow.get_variable(
         name="y1",
         shape=(1, 3, 1),
         dtype=flow.float,
         initializer=flow.random_uniform_initializer(-10, 10),
     )
     return flow_op(x, y, *args, **kwargs)
Ejemplo n.º 26
0
def variable_scope_test_job_1(a=of.FixedTensorDef((1, 3, 6, 6))):
    with of.scope.namespace("job1_scope1"):
        convw = of.get_variable(
            "conv_weight",
            shape=(5, 3, 3, 3),
            dtype=a.dtype,
            initializer=of.random_uniform_initializer(),
            trainable=True,
        )
        conv = of.nn.conv2d(a, convw, 1, "SAME", "NCHW", name="conv")

        with of.scope.namespace("job1_scope2"):
            fcw = of.get_variable(
                "fc_weight",
                shape=(180, 10),
                dtype=a.dtype,
                initializer=of.random_uniform_initializer(),
                trainable=True,
            )
            fc = of.matmul(of.reshape(conv, (conv.shape[0], -1)),
                           fcw,
                           name="fc")
            fcb = of.get_variable(
                "fc_bias",
                shape=(10, ),
                dtype=a.dtype,
                initializer=of.constant_initializer(1.0),
                trainable=True,
            )
            fc_bias = of.nn.bias_add(fc, fcb)

        fcw2 = of.get_variable(
            "fc2_weight",
            shape=(10, 20),
            dtype=a.dtype,
            initializer=of.random_uniform_initializer(),
            trainable=True,
        )
        fc2 = of.matmul(fc_bias, fcw2, name="fc2")

    print("conv_weight op name: ", convw.op_name)
    print("conv op name: ", conv.op_name)
    print("fc_weight op name: ", fcw.op_name)
    print("fc_bias op name: ", fcb.op_name)
    print("fc op name: ", fc.op_name)
    print("fc2_weight op name: ", fcw2.op_name)
    print("fc2 op name: ", fc2.op_name)

    return fc2
Ejemplo n.º 27
0
def _conv2d_layer(
        args,
        name,
        input,
        filters,
        kernel_size=3,
        strides=1,
        padding="SAME",
        data_format="NCHW",
        dilation_rate=1,
        activation=op_conf_util.kRelu,
        use_bias=False,
        weight_initializer=flow.random_uniform_initializer(),
        bias_initializer=flow.random_uniform_initializer(),
):
    weight_shape = (filters, input.shape[1], kernel_size, kernel_size)
    weight = flow.get_variable(
        name + "-weight",
        shape=weight_shape,
        dtype=input.dtype,
        initializer=weight_initializer,
    )
    weight = flow.identity(weight)
    weight = flow.repeat(weight, args.num_piece_in_batch)
    output = flow.nn.conv2d(input,
                            weight,
                            strides,
                            padding,
                            data_format,
                            dilation_rate,
                            name=name)
    if use_bias:
        bias = flow.get_variable(
            name + "-bias",
            shape=(filters, ),
            dtype=input.dtype,
            initializer=bias_initializer,
        )
        bias = flow.identity(bias)
        bias = flow.repeat(bias, args.num_piece_in_batch)
        output = flow.nn.bias_add(output, bias, data_format)

    if activation is not None:
        if activation == op_conf_util.kRelu:
            output = flow.math.relu(output)
        else:
            raise NotImplementedError

    return output
Ejemplo n.º 28
0
 def add_with_large_array():
     large_shape = (256 * 1024 * 1024 + 1, )
     x = flow.get_variable(
         name="x",
         shape=large_shape,
         dtype=flow.float,
         initializer=flow.random_uniform_initializer(),
     )
     y = flow.get_variable(
         name="y",
         shape=large_shape,
         dtype=flow.float,
         initializer=flow.random_uniform_initializer(),
     )
     return flow.math.add_n([x, y])
Ejemplo n.º 29
0
 def add_var():
     return flow.get_variable(
         name="trick",
         shape=(1, ),
         dtype=flow.float,
         initializer=flow.random_uniform_initializer(),
     )
Ejemplo n.º 30
0
 def get_var(var_name):
     return flow.get_variable(
         name=var_name,
         shape=(2, 256, 14, 14),
         dtype=flow.float32,
         initializer=flow.random_uniform_initializer(),
     )