示例#1
0
 def __init__(self, is_training=True):
     super(CrossEntropyCalculation, self).__init__()
     self.onehot = P.OneHot()
     self.on_value = Tensor(1.0, ts.float32)
     self.off_value = Tensor(0.0, ts.float32)
     self.reduce_sum = P.ReduceSum()
     self.reduce_mean = P.ReduceMean()
     self.reshape = P.Reshape()
     self.last_idx = (-1, )
     self.neg = P.Neg()
     self.cast = P.Cast()
     self.is_training = is_training
示例#2
0
 def __init__(self,
              field_size,
              vocab_size,
              embed_size,
              keep_prob=0.9,
              convert_dtype=False):
     super(DeepFM, self).__init__()
     self.field_size = field_size
     self.vocab_size = vocab_size
     self.embed_size = embed_size
     self.embedding = Parameter(Tensor(
         np.random.normal(loc=0.0,
                          scale=0.01,
                          size=[vocab_size,
                                embed_size]).astype(dtype=np.float32)),
                                name="embedding")
     self.fm_weight = Parameter(Tensor(
         np.random.normal(loc=0.0, scale=0.01,
                          size=[vocab_size, 1]).astype(dtype=np.float32)),
                                name="fm_weight")
     self.dense_layer_1 = DenseLayer(field_size * embed_size,
                                     1024,
                                     keep_prob=keep_prob,
                                     convert_dtype=convert_dtype)
     self.dense_layer_2 = DenseLayer(1024,
                                     512,
                                     keep_prob=keep_prob,
                                     convert_dtype=convert_dtype)
     self.dense_layer_3 = DenseLayer(512,
                                     256,
                                     keep_prob=keep_prob,
                                     convert_dtype=convert_dtype)
     self.dense_layer_4 = DenseLayer(256,
                                     128,
                                     keep_prob=keep_prob,
                                     convert_dtype=convert_dtype)
     self.dense_layer_5 = DenseLayer(128,
                                     1,
                                     keep_prob=keep_prob,
                                     convert_dtype=convert_dtype)
     self.gather = Gather()
     self.mul = Mul()
     self.reduce_sum = ReduceSum(keep_dims=False)
     self.reshape = Reshape()
     self.square = Square()
     self.tile = Tile()
     self.concat = Concat(axis=1)
     self.cast = Cast()
示例#3
0
 def _initialize_weights(self):
     self.init_parameters_data()
     for _, m in self.cells_and_names():
         if isinstance(m, layers.Dense):
             m.weight.set_data(
                 Tensor(
                     np.random.normal(
                         0, 0.01, m.weight.data.shape).astype("float32")))
             if m.bias is not None:
                 m.bias.set_data(ts.zeros(m.bias.data.shape))
示例#4
0
    def query(self, images):
        """
        Query an image from the pool.

        By 50/100, the buffer will return input images.
        By 50/100, the buffer will return images previously stored in the buffer,
        and insert the current images to the buffer.

        Args:
            images (Tensor): The latest generated images from the generator

        Returns:
            Images tensor from the buffer.
        """
        if isinstance(images, Tensor):
            images = images.asnumpy()
        if self.pool_size == 0:  # if the buffer size is 0, do nothing
            return Tensor(images)
        return_images = []
        for image in images:
            # if the buffer is not full; keep inserting current images to the buffer
            if self.num_imgs < self.pool_size:
                self.num_imgs = self.num_imgs + 1
                self.images.append(image)
                return_images.append(image)
            else:
                p = random.uniform(0, 1)
                # by 50% chance, the buffer will return a previously stored image
                # and insert the current image into the buffer
                if p > 0.5:
                    random_id = random.randint(0, self.pool_size -
                                               1)  # randint is inclusive
                    tmp = self.images[random_id].copy()
                    self.images[random_id] = image
                    return_images.append(tmp)
                else:  # by another 50% chance, the buffer will return the current image
                    return_images.append(image)
        return_images = np.array(
            return_images)  # collect all the images and return
        if len(return_images.shape) != 4:
            raise ValueError("img should be 4d, but get shape {}".format(
                return_images.shape))
        return Tensor(return_images)
示例#5
0
    def __init__(self,
                 input_size,
                 hidden_size,
                 num_layers=1,
                 has_bias=True,
                 batch_first=False,
                 dropout=0.0,
                 bidirectional=False):
        super(StackLSTM, self).__init__()
        self.num_layers = num_layers
        self.batch_first = batch_first
        self.transpose = P.Transpose()

        # direction number
        num_directions = 2 if bidirectional else 1

        # input_size list
        input_size_list = [input_size]
        for i in range(num_layers - 1):
            input_size_list.append(hidden_size * num_directions)

        # layers
        layers = []
        for i in range(num_layers):
            layers.append(
                layers.LSTMCell(input_size=input_size_list[i],
                                hidden_size=hidden_size,
                                has_bias=has_bias,
                                batch_first=batch_first,
                                bidirectional=bidirectional,
                                dropout=dropout))

        # weights
        weights = []
        for i in range(num_layers):
            # weight size
            weight_size = (input_size_list[i] +
                           hidden_size) * num_directions * hidden_size * 4
            if has_bias:
                bias_size = num_directions * hidden_size * 4
                weight_size = weight_size + bias_size

            # numpy weight
            stdv = 1 / math.sqrt(hidden_size)
            w_np = np.random.uniform(-stdv, stdv,
                                     (weight_size, 1, 1)).astype(np.float32)

            # lstm weight
            weights.append(
                Parameter(initializer(Tensor(w_np), w_np.shape),
                          name="weight" + str(i)))

        #
        self.lstm = layers
        self.weight = ParameterTuple(tuple(weights))
示例#6
0
 def __init__(self,
              config,
              is_training,
              num_labels=2,
              dropout_prob=0.0,
              use_one_hot_embeddings=False):
     super(BertSquad, self).__init__()
     self.bert = BertSquadModel(config, is_training, num_labels,
                                dropout_prob, use_one_hot_embeddings)
     self.loss = CrossEntropyCalculation(is_training)
     self.num_labels = num_labels
     self.seq_length = config.seq_length
     self.is_training = is_training
     self.total_num = Parameter(Tensor([0], ts.float32), name='total_num')
     self.start_num = Parameter(Tensor([0], ts.float32), name='start_num')
     self.end_num = Parameter(Tensor([0], ts.float32), name='end_num')
     self.sum = P.ReduceSum()
     self.equal = P.Equal()
     self.argmax = P.ArgMaxWithValue(axis=1)
     self.squeeze = P.Squeeze(axis=-1)
示例#7
0
def get_bert_thor_damping(damping_max=5e-2,
                          damping_min=1e-6,
                          damping_power=1.0,
                          damping_total_steps=30000):
    damping = _get_poly_lr(global_step=0,
                           lr_init=0.0,
                           lr_end=damping_min,
                           lr_max=damping_max,
                           warmup_steps=0,
                           total_steps=damping_total_steps,
                           poly_power=damping_power)
    return Tensor(damping)
示例#8
0
def get_bert_thor_lr(lr_max=0.0034,
                     lr_min=3.244e-05,
                     lr_power=1.0,
                     lr_total_steps=30000):
    learning_rate = _get_poly_lr(global_step=0,
                                 lr_init=0.0,
                                 lr_end=lr_min,
                                 lr_max=lr_max,
                                 warmup_steps=0,
                                 total_steps=lr_total_steps,
                                 poly_power=lr_power)
    return Tensor(learning_rate)
示例#9
0
 def __init__(self,
              vocab_size,
              embedding_size,
              embedding_shape,
              use_one_hot_embeddings=False,
              initializer_range=0.02):
     super(EmbeddingLookup, self).__init__()
     self.vocab_size = vocab_size
     self.use_one_hot_embeddings = use_one_hot_embeddings
     self.embedding_table = Parameter(initializer
                                      (TruncatedNormal(initializer_range),
                                       [vocab_size, embedding_size]))
     self.expand = P.ExpandDims()
     self.shape_flat = (-1,)
     self.gather = P.Gather()
     self.one_hot = P.OneHot()
     self.on_value = Tensor(1.0, ts.float32)
     self.off_value = Tensor(0.0, ts.float32)
     self.array_mul = P.MatMul()
     self.reshape = P.Reshape()
     self.shape = tuple(embedding_shape)
示例#10
0
 def __init__(self,
              in_channels,
              out_channels,
              keep_prob=0.9,
              convert_dtype=True):
     super(DenseLayer, self).__init__()
     self.weight = Parameter(Tensor(
         np.random.normal(loc=0.0,
                          scale=0.01,
                          size=[in_channels,
                                out_channels]).astype(dtype=np.float32)),
                             name="weight")
     self.bias = Parameter(Tensor(
         np.random.normal(loc=0.0, scale=0.01,
                          size=[out_channels]).astype(dtype=np.float32)),
                           name="bias")
     self.convert_dtype = convert_dtype
     self.dropout = Dropout(keep_prob=keep_prob)
     self.cast = Cast()
     self.matmul = MatMul(transpose_b=False)
     self.bias_add = BiasAdd()
     self.activation = ReLU()
示例#11
0
    def __init__(self,
                 embedding_size,
                 embedding_shape,
                 use_relative_positions=False,
                 use_token_type=False,
                 token_type_vocab_size=16,
                 use_one_hot_embeddings=False,
                 initializer_range=0.02,
                 max_position_embeddings=512,
                 dropout_prob=0.1):
        super(EmbeddingPostprocessor, self).__init__()
        self.use_token_type = use_token_type
        self.token_type_vocab_size = token_type_vocab_size
        self.use_one_hot_embeddings = use_one_hot_embeddings
        self.max_position_embeddings = max_position_embeddings
        self.embedding_table = Parameter(initializer
                                         (TruncatedNormal(initializer_range),
                                          [token_type_vocab_size,
                                           embedding_size]),
                                         name='embedding_table')

        self.shape_flat = (-1,)
        self.one_hot = layers.OneHot()
        self.on_value = Tensor(1.0, ts.float32)
        self.off_value = Tensor(0.1, ts.float32)
        self.array_mul = P.MatMul()
        self.reshape = P.Reshape()
        self.shape = tuple(embedding_shape)
        self.layernorm = layers.LayerNorm((embedding_size,))
        self.dropout = layers.Dropout(1 - dropout_prob)
        self.gather = P.Gather()
        self.use_relative_positions = use_relative_positions
        self.slice = P.StridedSlice()
        self.full_position_embeddings = Parameter(initializer
                                                  (TruncatedNormal(initializer_range),
                                                   [max_position_embeddings,
                                                    embedding_size]),
                                                  name='full_position_embeddings')
示例#12
0
 def _initialize_weights(self):
     self.init_parameters_data()
     for _, m in self.cells_and_names():
         if isinstance(m, layers.Conv2d):
             n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
             m.weight.set_data(
                 Tensor(
                     np.random.normal(0, np.sqrt(
                         2. / n), m.weight.data.shape).astype("float32")))
             if m.bias is not None:
                 m.bias.set_data(ts.zeros(m.bias.data.shape))
         elif isinstance(m, layers.BatchNorm2d):
             m.gamma.set_data(ts.ones(m.gamma.data.shape))
             m.beta.set_data(ts.zeros(m.beta.data.shape))
示例#13
0
def predict_process(args_opt,
                    data_loader,
                    G_generator,
                    predict_name='testA_to_fakeB',
                    fake_name='fake_B'):
    reporter = GanReporter(args_opt)
    reporter.start_predict(predict_name)
    for data in data_loader:
        img = Tensor(data["image"])
        path = str(data["image_name"][0], encoding="utf-8")
        fake = G_generator(img)
        save_image(fake, os.path.join(imgs_out, fake_name, path))
    reporter.info('save %s at %s', fake_name,
                  os.path.join(imgs_out, fake_name, path))
    reporter.end_predict()
示例#14
0
 def __init__(self, network, optimizer, scale_update_layer=None):
     super(BertSquadLayer, self).__init__(auto_prefix=False)
     self.network = network
     self.network.set_grad()
     self.weights = optimizer.parameters
     self.optimizer = optimizer
     self.grad = P.GradOperation(get_by_list=True, sens_param=True)
     self.allreduce = P.AllReduce()
     self.grad_reducer = None
     self.cast = P.Cast()
     self.alloc_status = P.NPUAllocFloatStatus()
     self.get_status = P.NPUGetFloatStatus()
     self.clear_before_grad = P.NPUClearFloatStatus()
     self.reduce_sum = P.ReduceSum(keep_dims=False)
     self.depend_parameter_use = P.Depend()
     self.base = Tensor(1, ts.float32)
     self.less_equal = P.LessEqual()
     self.hyper_map = P.HyperMap()
     self.loss_scale = None
     self.loss_scaling_manager = scale_update_layer
     if scale_update_layer:
         self.loss_scale = Parameter(Tensor(
             scale_update_layer.get_loss_scale(), dtype=ts.float32),
                                     name="loss_scale")
示例#15
0
    def __init__(self, network, optimizer, scale_update_layer=None):

        super(BertFinetuneLayer, self).__init__(auto_prefix=False)
        self.network = network
        self.network.set_grad()
        self.weights = optimizer.parameters
        self.optimizer = optimizer
        self.optimizer.global_step = Parameter(initializer(0., [
            1,
        ]),
                                               name='global_step')
        self.grad = P.GradOperation(get_by_list=True, sens_param=True)
        self.allreduce = P.AllReduce()
        self.grad_reducer = None
        self.cast = P.Cast()
        self.gpu_target = False
        if context.get_context("device_target") == "GPU":
            self.gpu_target = True
            self.float_status = P.FloatStatus()
            self.addn = P.AddN()
            self.reshape = P.Reshape()
        else:
            self.alloc_status = P.NPUAllocFloatStatus()
            self.get_status = P.NPUGetFloatStatus()
            self.clear_before_grad = P.NPUClearFloatStatus()
        self.reduce_sum = P.ReduceSum(keep_dims=False)
        self.depend_parameter_use = P.Depend()
        self.base = Tensor(1, ts.float32)
        self.less_equal = P.LessEqual()
        self.hyper_map = P.HyperMap()
        self.loss_scale = None
        self.loss_scaling_manager = scale_update_layer
        if scale_update_layer:
            self.loss_scale = Parameter(Tensor(
                scale_update_layer.get_loss_scale(), dtype=ts.float32),
                                        name="loss_scale")
示例#16
0
def _weight_variable(shape, factor=0.01):
    init_value = np.random.randn(*shape).astype(np.float32) * factor
    return Tensor(init_value)
示例#17
0
            imdbdata = ImdbDataset(args_opt.aclimdb_path, args_opt.glove_path,
                                   args_opt.embed_size)
            imdbdata.convert_to_mindrecord(args_opt.preprocess_path)

    embedding_table = np.loadtxt(
        os.path.join(args_opt.preprocess_path,
                     "weight.txt")).astype(np.float32)

    # build the network
    net = SentimentNet(vocab_size=embedding_table.shape[0],
                       embed_size=args_opt.embed_size,
                       num_hiddens=args_opt.num_hiddens,
                       num_layers=args_opt.num_layers,
                       bidirectional=args_opt.bidirectional,
                       num_classes=args_opt.num_classes,
                       weight=Tensor(embedding_table),
                       batch_size=args_opt.batch_size)
    net.update_parameters_name(prefix='huawei')
    model = Model(net)

    # define the loss function
    net_loss = SoftmaxCrossEntropyWithLogits(sparse=True, reduction="mean")
    # define the optimizer
    net_opt = Momentum(filter(lambda x: x.requires_grad, net.get_parameters()),
                       0.01, 0.9)
    model.compile(loss_fn=net_loss,
                  optimizer=net_opt,
                  metrics={"Accuracy": Accuracy()})

    epoch_size = args_opt.epoch_size
    batch_size = args_opt.batch_size
示例#18
0
        eval_net = ssd300_infer(class_num=args_opt.num_classes)
        model = Model(eval_net)
        if args_opt.checkpoint_path:
            model.load_checkpoint(args_opt.checkpoint_path)
        # perform the model predict operation
        print("\n========================================\n")
        print("total images num: ", total)
        print("Processing, please wait a moment...")
        start = time.time()
        pred_data = []
        id_iter = 0
        for data in ds_eval.create_dict_iterator(output_numpy=True):
            image_np = data['image']
            image_shape = data['image_shape']

            output = model.predict(Tensor(image_np))
            for batch_idx in range(image_np.shape[0]):
                pred_data.append({
                    "boxes": output[0].asnumpy()[batch_idx],
                    "box_scores": output[1].asnumpy()[batch_idx],
                    "img_id": id_iter,
                    "image_shape": image_shape[batch_idx]
                })
                id_iter += 1
        cost_time = int((time.time() - start) * 1000)
        print(f'    100% [{total}/{total}] cost {cost_time} ms')
        # calculate mAP for the predict data
        voc_cls = [
            'background', 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle',
            'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse',
            'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train',