Exemple #1
0
 def __init__(self, num_class=10):
     super(LeNet5, self).__init__()
     self.cast = P.Cast()
     self.flatten = nn.Flatten()
     self.embedding = nn.EmbeddingLookup(16, 4)
     self.relu = nn.ReLU()
     self.fc = fc_with_initialize(12, num_class)
Exemple #2
0
 def __init__(self, vocab_size, embedding_size, target='CPU'):
     super().__init__()
     self.embedding_lookup = nn.EmbeddingLookup(vocab_size, embedding_size, param_init='ones', target=target)
     self.bn = nn.BatchNorm2d(num_features=3)
     self.mul = P.Mul()
     self.reshape = P.Reshape()
     self.relu = nn.PReLU()
Exemple #3
0
 def __init__(self, shape=None):
     super(Net, self).__init__()
     if shape is None:
         shape = [8, 8]
     self.index = Tensor(np.ones(shape), dtype=ms.int32)
     self.embeddinglookup = nn.EmbeddingLookup(64, 64, param_init='ones')
     self.embeddinglookup.embeddinglookup.set_strategy(((1, 1), (8, 1)))
 def __init__(self, param_np, target='CPU'):
     super().__init__()
     self.param = Parameter(Tensor(param_np), name="w1")
     self.embedding_lookup = nn.EmbeddingLookup(target=target)
     self.bn = nn.BatchNorm2d(num_features=3)
     self.mul = P.Mul()
     self.reshape = P.Reshape()
     self.relu = nn.PReLU()
 def __init__(self, shape=None):
     super(Net, self).__init__()
     if shape is None:
         shape = [8, 8]
     weight = Tensor(np.ones([64, 64]), dtype=ms.float32)
     self.weight = Parameter(weight, "w")
     self.index = Tensor(np.ones(shape), dtype=ms.int32)
     self.embeddinglookup = nn.EmbeddingLookup()
     self.embeddinglookup.embeddinglookup.set_strategy(((1, 1), (8, 1)))
Exemple #6
0
 def __init__(self, num_class=10):
     super(LeNet5, self).__init__()
     self.cast = P.Cast()
     self.flatten = nn.Flatten()
     self.embedding_table = Parameter(initializer("normal", (16, 4),
                                                  mstype.float32),
                                      name="embedding_table")
     self.embedding = nn.EmbeddingLookup()
     self.relu = nn.ReLU()
     self.fc = fc_with_initialize(12, num_class)
    def __init__(self, config):
        super(WideDeepModel, self).__init__()
        self.batch_size = config.batch_size
        host_device_mix = bool(config.host_device_mix)
        parameter_server = bool(config.parameter_server)
        parallel_mode = context.get_auto_parallel_context("parallel_mode")
        is_auto_parallel = parallel_mode in (ParallelMode.SEMI_AUTO_PARALLEL, ParallelMode.AUTO_PARALLEL)
        if is_auto_parallel:
            self.batch_size = self.batch_size * get_group_size()
        is_field_slice = config.field_slice
        self.field_size = config.field_size
        self.vocab_size = config.vocab_size
        self.emb_dim = config.emb_dim
        self.deep_layer_dims_list = config.deep_layer_dim
        self.deep_layer_act = config.deep_layer_act
        self.init_args = config.init_args
        self.weight_init, self.bias_init = config.weight_bias_init
        self.weight_bias_init = config.weight_bias_init
        self.emb_init = config.emb_init
        self.drop_out = config.dropout_flag
        self.keep_prob = config.keep_prob
        self.deep_input_dims = self.field_size * self.emb_dim
        self.layer_dims = self.deep_layer_dims_list + [1]
        self.all_dim_list = [self.deep_input_dims] + self.layer_dims

        init_acts = [('Wide_b', [1], self.emb_init)]
        var_map = init_var_dict(self.init_args, init_acts)
        self.wide_b = var_map["Wide_b"]
        self.dense_layer_1 = DenseLayer(self.all_dim_list[0],
                                        self.all_dim_list[1],
                                        self.weight_bias_init,
                                        self.deep_layer_act,
                                        convert_dtype=True, drop_out=config.dropout_flag)
        self.dense_layer_2 = DenseLayer(self.all_dim_list[1],
                                        self.all_dim_list[2],
                                        self.weight_bias_init,
                                        self.deep_layer_act,
                                        convert_dtype=True, drop_out=config.dropout_flag)
        self.dense_layer_3 = DenseLayer(self.all_dim_list[2],
                                        self.all_dim_list[3],
                                        self.weight_bias_init,
                                        self.deep_layer_act,
                                        convert_dtype=True, drop_out=config.dropout_flag)
        self.dense_layer_4 = DenseLayer(self.all_dim_list[3],
                                        self.all_dim_list[4],
                                        self.weight_bias_init,
                                        self.deep_layer_act,
                                        convert_dtype=True, drop_out=config.dropout_flag)
        self.dense_layer_5 = DenseLayer(self.all_dim_list[4],
                                        self.all_dim_list[5],
                                        self.weight_bias_init,
                                        self.deep_layer_act,
                                        use_activation=False, convert_dtype=True, drop_out=config.dropout_flag)
        self.wide_mul = P.Mul()
        self.deep_mul = P.Mul()
        self.reduce_sum = P.ReduceSum(keep_dims=False)
        self.reshape = P.Reshape()
        self.deep_reshape = P.Reshape()
        self.square = P.Square()
        self.shape = P.Shape()
        self.tile = P.Tile()
        self.concat = P.Concat(axis=1)
        self.cast = P.Cast()
        if is_auto_parallel and host_device_mix and not is_field_slice:
            self.dense_layer_1.dropout.dropout_do_mask.set_strategy(((1, get_group_size()),))
            self.dense_layer_1.dropout.dropout.set_strategy(((1, get_group_size()),))
            self.dense_layer_1.matmul.set_strategy(((1, get_group_size()), (get_group_size(), 1)))
            self.deep_embeddinglookup = nn.EmbeddingLookup(self.vocab_size, self.emb_dim,
                                                           slice_mode=nn.EmbeddingLookUpSplitMode.TABLE_COLUMN_SLICE)
            self.wide_embeddinglookup = nn.EmbeddingLookup(self.vocab_size, 1,
                                                           slice_mode=nn.EmbeddingLookUpSplitMode.TABLE_ROW_SLICE)
            self.deep_mul.set_strategy(((1, 1, get_group_size()), (1, 1, 1)))
            self.deep_reshape.add_prim_attr("skip_redistribution", True)
            self.reduce_sum.add_prim_attr("cross_batch", True)
            self.embedding_table = self.deep_embeddinglookup.embedding_table
        elif is_auto_parallel and host_device_mix and is_field_slice and config.full_batch and config.manual_shape:
            manual_shapes = tuple((s[0] for s in config.manual_shape))
            self.deep_embeddinglookup = nn.EmbeddingLookup(self.vocab_size, self.emb_dim,
                                                           slice_mode=nn.EmbeddingLookUpSplitMode.FIELD_SLICE,
                                                           manual_shapes=manual_shapes)
            self.wide_embeddinglookup = nn.EmbeddingLookup(self.vocab_size, 1,
                                                           slice_mode=nn.EmbeddingLookUpSplitMode.FIELD_SLICE,
                                                           manual_shapes=manual_shapes)
            self.deep_mul.set_strategy(((1, get_group_size(), 1), (1, get_group_size(), 1)))
            self.wide_mul.set_strategy(((1, get_group_size(), 1), (1, get_group_size(), 1)))
            self.reduce_sum.set_strategy(((1, get_group_size(), 1),))
            self.dense_layer_1.dropout.dropout_do_mask.set_strategy(((1, get_group_size()),))
            self.dense_layer_1.dropout.dropout.set_strategy(((1, get_group_size()),))
            self.dense_layer_1.matmul.set_strategy(((1, get_group_size()), (get_group_size(), 1)))
            self.embedding_table = self.deep_embeddinglookup.embedding_table
        elif parameter_server:
            self.deep_embeddinglookup = nn.EmbeddingLookup(self.vocab_size, self.emb_dim)
            self.wide_embeddinglookup = nn.EmbeddingLookup(self.vocab_size, 1)
            self.embedding_table = self.deep_embeddinglookup.embedding_table
            self.deep_embeddinglookup.embedding_table.set_param_ps()
            self.wide_embeddinglookup.embedding_table.set_param_ps()
        else:
            self.deep_embeddinglookup = nn.EmbeddingLookup(self.vocab_size, self.emb_dim, target='DEVICE')
            self.wide_embeddinglookup = nn.EmbeddingLookup(self.vocab_size, 1, target='DEVICE')
            self.embedding_table = self.deep_embeddinglookup.embedding_table
 def __init__(self, dim, max_seq_len):
     super(AbsolutePositionalEmbedding, self).__init__()
     self.emb = nn.EmbeddingLookup(max_seq_len, dim)
Exemple #9
0
    def __init__(self, config):
        super(WideDeepModel, self).__init__()
        self.batch_size = config.batch_size
        host_device_mix = bool(config.host_device_mix)
        parallel_mode = _get_parallel_mode()
        is_auto_parallel = parallel_mode in (ParallelMode.SEMI_AUTO_PARALLEL,
                                             ParallelMode.AUTO_PARALLEL)
        if is_auto_parallel:
            self.batch_size = self.batch_size * get_group_size()
        self.field_size = config.field_size
        self.vocab_size = config.vocab_size
        self.emb_dim = config.emb_dim
        self.deep_layer_dims_list = config.deep_layer_dim
        self.deep_layer_act = config.deep_layer_act
        self.init_args = config.init_args
        self.weight_init, self.bias_init = config.weight_bias_init
        self.weight_bias_init = config.weight_bias_init
        self.emb_init = config.emb_init
        self.drop_out = config.dropout_flag
        self.keep_prob = config.keep_prob
        self.deep_input_dims = self.field_size * self.emb_dim
        self.layer_dims = self.deep_layer_dims_list + [1]
        self.all_dim_list = [self.deep_input_dims] + self.layer_dims

        init_acts = [('Wide_w', [self.vocab_size, 1], self.emb_init),
                     ('V_l2', [self.vocab_size, self.emb_dim], self.emb_init),
                     ('Wide_b', [1], self.emb_init)]
        var_map = init_var_dict(self.init_args, init_acts)
        self.wide_w = var_map["Wide_w"]
        self.wide_b = var_map["Wide_b"]
        self.embedding_table = var_map["V_l2"]
        self.dense_layer_1 = DenseLayer(self.all_dim_list[0],
                                        self.all_dim_list[1],
                                        self.weight_bias_init,
                                        self.deep_layer_act,
                                        convert_dtype=True,
                                        drop_out=config.dropout_flag)
        self.dense_layer_2 = DenseLayer(self.all_dim_list[1],
                                        self.all_dim_list[2],
                                        self.weight_bias_init,
                                        self.deep_layer_act,
                                        convert_dtype=True,
                                        drop_out=config.dropout_flag)
        self.dense_layer_3 = DenseLayer(self.all_dim_list[2],
                                        self.all_dim_list[3],
                                        self.weight_bias_init,
                                        self.deep_layer_act,
                                        convert_dtype=True,
                                        drop_out=config.dropout_flag)
        self.dense_layer_4 = DenseLayer(self.all_dim_list[3],
                                        self.all_dim_list[4],
                                        self.weight_bias_init,
                                        self.deep_layer_act,
                                        convert_dtype=True,
                                        drop_out=config.dropout_flag)
        self.dense_layer_5 = DenseLayer(self.all_dim_list[4],
                                        self.all_dim_list[5],
                                        self.weight_bias_init,
                                        self.deep_layer_act,
                                        use_activation=False,
                                        convert_dtype=True,
                                        drop_out=config.dropout_flag)
        self.wide_mul = P.Mul()
        self.deep_mul = P.Mul()
        self.reduce_sum = P.ReduceSum(keep_dims=False)
        self.reshape = P.Reshape()
        self.deep_reshape = P.Reshape()
        self.square = P.Square()
        self.shape = P.Shape()
        self.tile = P.Tile()
        self.concat = P.Concat(axis=1)
        self.cast = P.Cast()
        if is_auto_parallel and host_device_mix:
            self.dense_layer_1.dropout.dropout_do_mask.set_strategy(
                ((1, get_group_size()), ))
            self.dense_layer_1.matmul.set_strategy(
                ((1, get_group_size()), (get_group_size(), 1)))
            self.deep_embeddinglookup = nn.EmbeddingLookup()
            self.deep_embeddinglookup.embeddinglookup.set_strategy(
                ((1, get_group_size()), (1, 1)))
            self.wide_embeddinglookup = nn.EmbeddingLookup()
            self.wide_embeddinglookup.embeddinglookup.set_strategy(
                ((get_group_size(), 1), (1, 1)))
            self.deep_mul.set_strategy(((1, 1, get_group_size()), (1, 1, 1)))
            self.deep_reshape.add_prim_attr("skip_redistribution", True)
            self.reduce_sum.add_prim_attr("cross_batch", True)
        else:
            self.deep_embeddinglookup = nn.EmbeddingLookup(target='DEVICE')
            self.wide_embeddinglookup = nn.EmbeddingLookup(target='DEVICE')
 def __init__(self, vocab_size, embedding_size, target="CPU"):
     super(NetWithEmbeddingLookUp, self).__init__()
     self.embedding_lookup =  \
                     nn.EmbeddingLookup(vocab_size=vocab_size,
                                        embedding_size=embedding_size,
                                        param_init="ones", target=target)