Пример #1
0
def print_everything(some_json, alias_name):
    stuff = json.loads(some_json)
    aliases = create_type_alias(stuff, type_alias_name=alias_name)
    decoders = [create_decoder(alias, has_snakecase=True, prefix='decode') for alias in aliases ]
    encoders = [create_encoder(alias, has_snakecase=True, prefix='encode') for alias in aliases ]

    print('\n'.join(aliases))
    print('\n'.join(decoders))
    print('\n'.join(encoders))
Пример #2
0
def from_elm_file(file_text):
    aliases = find_type_aliases(file_text)
    unions = find_union_types(file_text)


    decoders = [create_decoder(alias, has_snakecase=True, prefix='decode') for alias in aliases ]
    decoders.extend(create_union_type_decoder(union_type) for union_type in unions)
    encoders = [create_encoder(alias, has_snakecase=True, prefix='encode') for alias in aliases ]
    encoders.extend(create_union_type_encoder(union_type) for union_type in unions)

    print('\n'.join(decoders))
    print('\n'.join(encoders))
Пример #3
0
    def __init__(self,units,vocab_size,use_att= False,dropout_rate=0.5,isBidirectional=True):

        super(JointModel,self).__init__()

        self._dropout_rate = dropout_rate
        self._isBidirectional = isBidirectional

        self._units = units

        ## encoder进行特征抽取
        self._encoder = create_encoder(self._units,dropout_rate = dropout_rate,isBidirectional=isBidirectional)

        if use_att:
            name = 'ATT'
        else:
            name = 'basic'

        ## decoder进行序列解析
        self._decoder = create_decoder(name,self._units,dropout_rate,False)

        ## classification的decoder
        self._clas_decoder = create_decoder(name,self._units,dropout_rate,False,vocab_size)
Пример #4
0
def print_everything(some_json, alias_name):
    stuff = json.loads(some_json)
    aliases = create_type_alias(stuff, type_alias_name=alias_name)
    decoders = [
        create_decoder(alias, has_snakecase=True, prefix='decode')
        for alias in aliases
    ]
    encoders = [
        create_encoder(alias, has_snakecase=True, prefix='encode')
        for alias in aliases
    ]

    print('\n'.join(aliases))
    print('\n'.join(decoders))
    print('\n'.join(encoders))
Пример #5
0
def from_elm_file(file_text):
    aliases = find_type_aliases(file_text)
    unions = find_union_types(file_text)

    decoders = [
        create_decoder(alias, has_snakecase=True, prefix='decode')
        for alias in aliases
    ]
    decoders.extend(
        create_union_type_decoder(union_type) for union_type in unions)
    encoders = [
        create_encoder(alias, has_snakecase=True, prefix='encode')
        for alias in aliases
    ]
    encoders.extend(
        create_union_type_encoder(union_type) for union_type in unions)

    print('\n'.join(decoders))
    print('\n'.join(encoders))
Пример #6
0
def test():
    testJson = """
 { "name" : "Noah"
 , "age" : 23
 , "location" :
    { "name" : "sweden"
    , "days" : 45
    }
 }
 """

    stuff = json.loads(testJson)
    print('Creating type alias')

    aliases = create_type_alias(stuff, type_alias_name='Assignment')
    print('\n'.join(aliases))

    print('Creating decoder')
    decoders = []
    for alias in aliases:
        decoder = create_decoder(alias, has_snakecase=True, prefix='decode')
        decoders.append(decoder)

    print('\n'.join(decoders))

    print_everything(
"""
 { "name" : "Noah"
 , "age" : 23
 , "location" :
    { "name" : "sweden"
    , "days" : 45
    }
 }
"""
    , alias_name = "Person")

    print(create_union_type_decoder('type Action = Run | Hide | Noop'))
    print(create_union_type_encoder('type Action = Run | Hide | Noop'))
Пример #7
0
def test():
    testJson = """
 { "name" : "Noah"
 , "age" : 23
 , "location" :
    { "name" : "sweden"
    , "days" : 45
    }
 }
 """

    stuff = json.loads(testJson)
    print('Creating type alias')

    aliases = create_type_alias(stuff, type_alias_name='Assignment')
    print('\n'.join(aliases))

    print('Creating decoder')
    decoders = []
    for alias in aliases:
        decoder = create_decoder(alias, has_snakecase=True, prefix='decode')
        decoders.append(decoder)

    print('\n'.join(decoders))

    print_everything("""
 { "name" : "Noah"
 , "age" : 23
 , "location" :
    { "name" : "sweden"
    , "days" : 45
    }
 }
""",
                     alias_name="Person")

    print(create_union_type_decoder('type Action = Run | Hide | Noop'))
    print(create_union_type_encoder('type Action = Run | Hide | Noop'))
Пример #8
0
    def __init__(self,pathObj,m,n,scale = True,feature_set='basic',use_att=True,seperate_static=False,isBidirectional=False,use_l2 = True):

        ## 文件的路径
        self._pathObj = pathObj
        self._m = m
        self._n = n

        ## 是否对数据进行归一化 
        self._scale = scale
        ## 特征集合 basic-author-structure三类
        self._feature_set = feature_set

        ##使用attention
        self._use_att = use_att

        ## 静态变量是否直接进行串联
        self._seperate_static = seperate_static

        ## 是否使用bi-directional的GRU
        self._isBidirectional = isBidirectional

        ## 是否使用l2的weight
        self._use_l2 = use_l2
        if self._use_l2:
            self._l2_weight = 0.001

        ## 加载数据
        if not self._seperate_static:
            self._train_X,self._test_X,self._valid_X,self._dx_mean,self._dx_std,\
            self._train_Y,self._test_Y,self._valid_Y,self._y_mean,self._y_std,\
            self._test_sorted_ids = construct_RNN_datasets(pathObj,m,n,self._scale,self._feature_set)
        else:
            self._train_X,self._test_X,self._valid_X,self._dx_mean,self._dx_std,\
            self._train_SX,self._test_SX,self._valid_SX,self._sx_mean,self._sx_std,\
            self._train_Y,self._test_Y,self._valid_Y,self._y_mean,self._y_std,\
            self._test_sorted_ids = construct_RNN_datasets(pathObj,m,n,self._scale,self._feature_set,seperate_static=self._seperate_static)

        ## 数据集
        self._batch_sz = 512
        self._buffer_size = len(self._train_Y)
        self._n_batchs =self._buffer_size//self._batch_sz

        if not self._seperate_static:
            self._dataset = tf.data.Dataset.from_tensor_slices((self._train_X,self._train_Y)).shuffle(self._buffer_size)
        else:
            self._dataset = tf.data.Dataset.from_tensor_slices((self._train_X,self._train_SX,self._train_Y)).shuffle(self._buffer_size)
        self._dataset = self._dataset.batch(self._batch_sz, drop_remainder=True)

        ### test
        self._test_buffer_size = len(self._test_Y)
        self._n_test_batchs = self._test_buffer_size//self._batch_sz if self._test_buffer_size%self._batch_sz==0 else self._test_buffer_size//self._batch_sz+1

        if not self._seperate_static:
            self._test_dataset = tf.data.Dataset.from_tensor_slices((self._test_X,self._test_Y)).shuffle(self._buffer_size)
        else:
            self._test_dataset = tf.data.Dataset.from_tensor_slices((self._test_X,self._test_SX,self._test_Y)).shuffle(self._test_buffer_size)
        self._test_dataset = self._test_dataset.batch(self._batch_sz, drop_remainder=True)


        self._valid_buffer_size = len(self._valid_Y)
        self._n_valid_batchs = self._valid_buffer_size//self._batch_sz if self._valid_buffer_size%self._batch_sz==0 else self._valid_buffer_size//self._batch_sz+1
        
        if not self._seperate_static:
            self._valid_dataset = tf.data.Dataset.from_tensor_slices((self._valid_X,self._valid_Y)).shuffle(self._buffer_size)
        else:
            self._valid_dataset = tf.data.Dataset.from_tensor_slices((self._valid_X,self._valid_SX,self._valid_Y)).shuffle(self._valid_buffer_size)
        self._valid_dataset = self._valid_dataset.batch(self._batch_sz, drop_remainder=True)

        ## dropout rate
        self._dropout_rate = 0.5
        self._units = 128

        ## 初始化encoder以及decoder
        self._model_name = self.gen_model_name()

        self._encoder = create_encoder(self._units,self._dropout_rate,self._isBidirectional)
        self._decoder = create_decoder(self._model_name,self._units,self._dropout_rate,self._seperate_static)

        print('train model  {}.'.format(self._model_name))

        ## optimizer
        self._optimizer = tf.keras.optimizers.Adam(learning_rate=5e-4,beta_2=0.95)

        ## 模型的保存位置
        self._checkpoint_dir = './trainning_checkpoints_{}_{}_{}'.format(self._model_name, m,n)
        self._checkpoint_prefix = os.path.join(self._checkpoint_dir, "ckpt")

        self._trackables = {}
        self._trackables['optimizer']=self._optimizer
        self._trackables['encoder']=self._encoder
        self._trackables['decoder']=self._decoder
        self._checkpoint = tf.train.Checkpoint(**self._trackables)