Esempio n. 1
0
 def __init__(self,
              device,
              hidden_dimensions=256,
              vocab_size=717,
              input_dim=512,
              num_layers=6):
     super().__init__()
     self.encoder = create_encoder(device, hidden_dimensions)
     self.input_proj = nn.Conv2d(self.encoder.num_channels,
                                 hidden_dimensions,
                                 kernel_size=1).to(device)
     self.decoder = Transformer(device)
     self.ffn = FFN(hidden_dimensions, input_dim, vocab_size,
                    num_layers).to(device)
     self.device = device
Esempio n. 2
0
    def __init__(self,units,vocab_size,use_att= False,dropout_rate=0.5,isBidirectional=True):

        super(JointModel,self).__init__()

        self._dropout_rate = dropout_rate
        self._isBidirectional = isBidirectional

        self._units = units

        ## encoder进行特征抽取
        self._encoder = create_encoder(self._units,dropout_rate = dropout_rate,isBidirectional=isBidirectional)

        if use_att:
            name = 'ATT'
        else:
            name = 'basic'

        ## decoder进行序列解析
        self._decoder = create_decoder(name,self._units,dropout_rate,False)

        ## classification的decoder
        self._clas_decoder = create_decoder(name,self._units,dropout_rate,False,vocab_size)
Esempio n. 3
0
def nnz(x):
    return "%s.nnz" % (toPython(x.obj))


lookup = {
    codes.ConstantCoeff: constant,
    codes.OnesCoeff: ones,
    codes.NegateCoeff: negate,
    codes.EyeCoeff: eye,
    codes.TransposeCoeff: trans,
    codes.ParameterCoeff: parameter,
    codes.ScalarParameterCoeff: scalar_parameter,
    codes.AddCoeff: add,
    codes.MulCoeff: mul,
    codes.Just: just,
    codes.LoopRows: loop("row"),
    codes.LoopCols: loop("col"),
    codes.LoopOver: loop("data"),
    codes.Range: _range,  # "range" is reserved
    codes.Repeat: repeat,
    codes.Assign: assign,
    codes.NNZ: nnz,
    str: lambda x: x,
    int: lambda x: str(x),
    float: lambda x: str(x),
    AbstractDim: lambda x: str(x)
}

toPython = create_encoder(lookup)
Esempio n. 4
0
def assign(x):
    return "%s = sparse(%s)" % (toMatlab(x.lhs), toMatlab(x.rhs))

def nnz(x):
    return "%s.nnz" % (toMatlab(x.obj))


lookup = {
    codes.ConstantCoeff:          constant,
    codes.EyeCoeff:               eye,
    codes.OnesCoeff:              ones,
    codes.TransposeCoeff:         trans,
    codes.ParameterCoeff:         parameter,
    codes.ScalarParameterCoeff:   parameter,
    codes.NegateCoeff:            negate,
    codes.AddCoeff:               add,
    codes.MulCoeff:               mul,
    codes.Just:                   just,
    codes.LoopRows:               loop_rows,
    codes.LoopCols:               loop_cols,
    codes.LoopOver:               loop_over,
    codes.Range:                  _range, # "range" is reserved
    codes.Repeat:                 repeat,
    codes.Assign:                 assign,
    codes.NNZ:                    nnz,
    str: lambda x: x,
    int: lambda x: str(x)
}
toMatlab = create_encoder(lookup)
Esempio n. 5
0
def nnz(x):
    if isinstance(x.obj, codes.ParameterCoeff) and x.obj.cols == 1:
        return "%s" % (x.obj.rows)
    return "%s->nnz" % (toC(x.obj))

lookup = {
    codes.ConstantCoeff:            constant,
    codes.OnesCoeff:                ones,
    codes.NegateCoeff:              negate,
    codes.EyeCoeff:                 eye,
    codes.TransposeCoeff:           trans,
    codes.ParameterCoeff:           parameter,
    codes.ScalarParameterCoeff:     scalar_parameter,
    codes.AddCoeff:                 add,
    codes.MulCoeff:                 mul,
    codes.Just:                     just,
    codes.LoopRows:                 loop("i"),
    codes.LoopCols:                 loop("j"),
    codes.LoopOver:                 loop("v"),
    codes.Range:                    _range,
    codes.Repeat:                   repeat,
    codes.Assign:                   assign,
    codes.NNZ:                      nnz,
    str:                            lambda x: x,
    int:                            lambda x: str(x),
    float:                          lambda x: str(x),
    AbstractDim:                    lambda x: str(x)
}

toC = create_encoder(lookup)
Esempio n. 6
0
    def __init__(self,pathObj,m,n,scale = True,feature_set='basic',use_att=True,seperate_static=False,isBidirectional=False,use_l2 = True):

        ## 文件的路径
        self._pathObj = pathObj
        self._m = m
        self._n = n

        ## 是否对数据进行归一化 
        self._scale = scale
        ## 特征集合 basic-author-structure三类
        self._feature_set = feature_set

        ##使用attention
        self._use_att = use_att

        ## 静态变量是否直接进行串联
        self._seperate_static = seperate_static

        ## 是否使用bi-directional的GRU
        self._isBidirectional = isBidirectional

        ## 是否使用l2的weight
        self._use_l2 = use_l2
        if self._use_l2:
            self._l2_weight = 0.001

        ## 加载数据
        if not self._seperate_static:
            self._train_X,self._test_X,self._valid_X,self._dx_mean,self._dx_std,\
            self._train_Y,self._test_Y,self._valid_Y,self._y_mean,self._y_std,\
            self._test_sorted_ids = construct_RNN_datasets(pathObj,m,n,self._scale,self._feature_set)
        else:
            self._train_X,self._test_X,self._valid_X,self._dx_mean,self._dx_std,\
            self._train_SX,self._test_SX,self._valid_SX,self._sx_mean,self._sx_std,\
            self._train_Y,self._test_Y,self._valid_Y,self._y_mean,self._y_std,\
            self._test_sorted_ids = construct_RNN_datasets(pathObj,m,n,self._scale,self._feature_set,seperate_static=self._seperate_static)

        ## 数据集
        self._batch_sz = 512
        self._buffer_size = len(self._train_Y)
        self._n_batchs =self._buffer_size//self._batch_sz

        if not self._seperate_static:
            self._dataset = tf.data.Dataset.from_tensor_slices((self._train_X,self._train_Y)).shuffle(self._buffer_size)
        else:
            self._dataset = tf.data.Dataset.from_tensor_slices((self._train_X,self._train_SX,self._train_Y)).shuffle(self._buffer_size)
        self._dataset = self._dataset.batch(self._batch_sz, drop_remainder=True)

        ### test
        self._test_buffer_size = len(self._test_Y)
        self._n_test_batchs = self._test_buffer_size//self._batch_sz if self._test_buffer_size%self._batch_sz==0 else self._test_buffer_size//self._batch_sz+1

        if not self._seperate_static:
            self._test_dataset = tf.data.Dataset.from_tensor_slices((self._test_X,self._test_Y)).shuffle(self._buffer_size)
        else:
            self._test_dataset = tf.data.Dataset.from_tensor_slices((self._test_X,self._test_SX,self._test_Y)).shuffle(self._test_buffer_size)
        self._test_dataset = self._test_dataset.batch(self._batch_sz, drop_remainder=True)


        self._valid_buffer_size = len(self._valid_Y)
        self._n_valid_batchs = self._valid_buffer_size//self._batch_sz if self._valid_buffer_size%self._batch_sz==0 else self._valid_buffer_size//self._batch_sz+1
        
        if not self._seperate_static:
            self._valid_dataset = tf.data.Dataset.from_tensor_slices((self._valid_X,self._valid_Y)).shuffle(self._buffer_size)
        else:
            self._valid_dataset = tf.data.Dataset.from_tensor_slices((self._valid_X,self._valid_SX,self._valid_Y)).shuffle(self._valid_buffer_size)
        self._valid_dataset = self._valid_dataset.batch(self._batch_sz, drop_remainder=True)

        ## dropout rate
        self._dropout_rate = 0.5
        self._units = 128

        ## 初始化encoder以及decoder
        self._model_name = self.gen_model_name()

        self._encoder = create_encoder(self._units,self._dropout_rate,self._isBidirectional)
        self._decoder = create_decoder(self._model_name,self._units,self._dropout_rate,self._seperate_static)

        print('train model  {}.'.format(self._model_name))

        ## optimizer
        self._optimizer = tf.keras.optimizers.Adam(learning_rate=5e-4,beta_2=0.95)

        ## 模型的保存位置
        self._checkpoint_dir = './trainning_checkpoints_{}_{}_{}'.format(self._model_name, m,n)
        self._checkpoint_prefix = os.path.join(self._checkpoint_dir, "ckpt")

        self._trackables = {}
        self._trackables['optimizer']=self._optimizer
        self._trackables['encoder']=self._encoder
        self._trackables['decoder']=self._decoder
        self._checkpoint = tf.train.Checkpoint(**self._trackables)
Esempio n. 7
0
def nnz(x):
    return "%s.nnz" % (toPython(x.obj))

lookup = {
    codes.ConstantCoeff:            constant,
    codes.OnesCoeff:                ones,
    codes.NegateCoeff:              negate,
    codes.EyeCoeff:                 eye,
    codes.TransposeCoeff:           trans,
    codes.ParameterCoeff:           parameter,
    codes.ScalarParameterCoeff:     scalar_parameter,
    codes.AddCoeff:                 add,
    codes.MulCoeff:                 mul,
    codes.Just:                     just,
    codes.LoopRows:                 loop("row"),
    codes.LoopCols:                 loop("col"),
    codes.LoopOver:                 loop("data"),
    codes.Range:                    _range, # "range" is reserved
    codes.Repeat:                   repeat,
    codes.Assign:                   assign,
    codes.NNZ:                      nnz,
    str:                            lambda x: x,
    int:                            lambda x: str(x),
    float:                          lambda x: str(x),
    AbstractDim:                    lambda x: str(x)
}



toPython = create_encoder(lookup)
Esempio n. 8
0
    if isinstance(x.obj, codes.ParameterCoeff) and x.obj.cols == 1:
        return "%s" % (x.obj.rows)
    return "%s->nnz" % (toC(x.obj))


lookup = {
    codes.ConstantCoeff: constant,
    codes.OnesCoeff: ones,
    codes.NegateCoeff: negate,
    codes.EyeCoeff: eye,
    codes.TransposeCoeff: trans,
    codes.ParameterCoeff: parameter,
    codes.ScalarParameterCoeff: scalar_parameter,
    codes.AddCoeff: add,
    codes.MulCoeff: mul,
    codes.Just: just,
    codes.LoopRows: loop("i"),
    codes.LoopCols: loop("j"),
    codes.LoopOver: loop("v"),
    codes.Range: _range,
    codes.Repeat: repeat,
    codes.Assign: assign,
    codes.NNZ: nnz,
    str: lambda x: x,
    int: lambda x: str(x),
    float: lambda x: str(x),
    AbstractDim: lambda x: str(x)
}

toC = create_encoder(lookup)