Exemplo n.º 1
0
 def forward(self, xNum, xCat):
     # embed the auxiliary variables
     embedConcat = nd.concat(
             self.stationEmbedding(xCat[:,:,0]),
             self.nYearEmbedding(xCat[:,:,1]),
             self.nMonthEmbedding(xCat[:,:,2]),
             self.mDayEmbedding(xCat[:,:,3]),
             self.wdayEmbedding(xCat[:,:,4]),
             self.nHourEmbedding(xCat[:,:,5]),
                          dim=2)
     # The training and testing
     embedTrain = embedConcat[:,0:168,:]
     embedTest = embedConcat[:,168:,:]
     # The input series for encoding
     xNum = xNum.reshape((xNum.shape[0],xNum.shape[1],1))
     inputSeries = nd.concat(xNum, embedTrain, dim=2)
     inputSeries = nd.transpose(inputSeries, axes=(0,2,1))
     for subTCN in self.encoder:
         inputSeries = subTCN(inputSeries)
     # The output 
     output = inputSeries
     output = nd.transpose(output, axes=(0,2,1))
     output = nd.reshape(output,(output.shape[0], 1,-1))
     output = nd.broadcast_axis(output, axis=1, size=self.outputSize)
     # the decoder
     output=self.outputLayer(self.decoder(output, embedTest))
     #output = nd.sum_axis(output, axis=2)
     mu = nd.sum_axis(self.mu(output),  axis=2)
     sigma = nd.sum_axis(self.sigma(output),  axis=2)
     return mu, sigma
    def forward(self, current, previous, doc_encode):
        """[summary]

        Args:
            current ([type]): h_j (batch_size, sentence_hidden_size * 2)
            previous ([type]): s_j (batch_size, sentence_hidden_size * 2)
            doc_encode ([type]): d (batch_size, ndoc_dims)
        """
        # content: (batch_size, 1)
        content = self.content_encoder(current)
        # salience: (batch_size, sentence_hidden_size * 2)
        salience = self.salience_encoder(doc_encode)
        salience = current * salience
        # salience: (batch_size,)
        salience = nd.sum_axis(salience, -1)
        # salience: (batch_size, 1)
        salience = nd.expand_dims(salience, -1)

        # novelty: (bathc_size, sentence_hidden_size * 2)
        novelty = self.novelty_encoder(nd.tanh(previous))
        novelty = current * novelty
        # salience: (batch_size,)
        novelty = nd.sum_axis(novelty, -1)
        # salience: (batch_size, 1)
        novelty = nd.expand_dims(novelty, -1)

        # P: (batch_size, 1)
        P = nd.sigmoid(content + salience - novelty)

        return P
Exemplo n.º 3
0
 def forward(self, xNum, xCat):
     # embed the auxiliary variables
     embedConcat = nd.concat(self.stationEmbedding(xCat[:, :, 0]),
                             self.nYearEmbedding(xCat[:, :, 1]),
                             self.nMonthEmbedding(xCat[:, :, 2]),
                             self.mDayEmbedding(xCat[:, :, 3]),
                             self.wdayEmbedding(xCat[:, :, 4]),
                             self.nHourEmbedding(xCat[:, :, 5]),
                             dim=2)
     # The training and testing
     embedTrain = embedConcat[:, 0:self.inputSize,
                              ]  # only consider the id for the input series
     embedTest = embedConcat[:, self.inputSize:, :]
     # The input series for encoding
     xNum = xNum.reshape((xNum.shape[0], xNum.shape[1], 1))
     #inputSeries = nd.concat(xNum, embedTrain, dim=2)
     inputSeries = xNum
     inputSeries = nd.transpose(inputSeries, axes=(0, 2, 1))
     for subTCN in self.encoder:
         inputSeries = subTCN(inputSeries)
     # The output
     output = inputSeries
     output = nd.transpose(output, axes=(0, 2, 1))
     output = nd.reshape(output, (output.shape[0], 1, -1))
     output = nd.broadcast_axis(output, axis=1, size=self.outputSize)
     # the decoder
     output = self.outputLayer(self.decoder(output, embedTest))
     #output = nd.sum_axis(output, axis=2)
     # The quantile outputs
     outputQ10 = nd.sum_axis(self.Q10(output), axis=2)
     outputQ50 = nd.sum_axis(self.Q50(output), axis=2)
     outputQ90 = nd.sum_axis(self.Q90(output), axis=2)
     return outputQ10, outputQ50, outputQ90
 def preprocess(self, data, label):
     gray_data = nd.sum_axis(nd.array([[[[0.3]], [[0.59]], [[0.11]]]]) *
                             data,
                             1,
                             keepdims=True)
     gray_label = nd.sum_axis(nd.array([[[[1]], [[1]], [[1]]]]) * label, 1)
     one_hot_label = self.one_hot(gray_label)
     return gray_data, one_hot_label
def transform_fn(net, data, input_content_type, output_content_type):
    try:
        inp = json.loads(json.loads(data)[0])
        bucket = inp['bucket']
        prefix = inp['prefix']
        s3_response = download_from_s3(bucket, prefix)
        img = decode_response(s3_response)
        img = nd.expand_dims(nd.transpose(img, (2, 0, 1)), 0)
        img = nd.sum_axis(nd.array([[[[0.3]], [[0.59]], [[0.11]]]]) * img,
                          1,
                          keepdims=True)
        batch = mx.io.DataBatch([img])
        net.forward(batch)
        raw_output = net.get_outputs()[0].asnumpy()
        mask = np.argmax(raw_output, axis=(1))[0].astype(np.uint8)
        output_prefix = os.path.join(
            'output', '/'.join(prefix.split('/')[1:]).split('.')[0] +
            '_MASK_PREDICTION.png')
        push_to_s3(mask, bucket, output_prefix)
        response = {'bucket': bucket, 'prefix': output_prefix}
    except Exception as e:
        response = {'Error': str(e)}
    return json.dumps(response), output_content_type