示例#1
0
def load_args():
    """ Load possible arguments
        :return: A list of tuple (arg_type, arg_name, arg_value, arg_desc)
    """
    return load_parent_args() + [
        # Hyperparameters
        ('int', 'word_emb_size', 256, 'Word embedding size.'),
        ('int', 'lstm_size', 128, 'LSTM (Encoder and Decoder) size.'),
    ]
示例#2
0
def load_args():
    """ Load possible arguments
        :return: A list of tuple (arg_type, arg_name, arg_value, arg_desc)
    """
    return load_parent_args() + [
        # Hyperparameters
        ('int', 'nb_graph_conv', 8, 'Number of Graph Conv Layer'),
        ('int', 'word_emb_size', 400, 'Word embedding size.'),
        ('int', 'gcn_size', 80, 'Size of graph convolution outputs.'),
        ('int', 'lstm_size', 400, 'LSTM (Encoder and Decoder) size.'),
        ('int', 'attn_size', 80, 'LSTM decoder attention size.'),
    ]
示例#3
0
def load_args():
    """ Load possible arguments
        :return: A list of tuple (arg_type, arg_name, arg_value, arg_desc)
    """
    return load_parent_args() + [
        # Hyperparameters
        ('int', 'nb_graph_conv', 12, 'Number of Graph Conv Layer'),
        ('int', 'power_emb_size', 60, 'Power embedding size.'),
        ('int', 'season_emb_size', 20, 'Season embedding size.'),
        ('int', 'gcn_size', 120, 'Size of graph convolution outputs.'),
        ('int', 'attn_size', 120, 'LSTM decoder attention size.'),
        ('int', 'trsf_ctxt_size', 1000 + NB_SUPPLY_CENTERS * TOKENS_PER_ORDER, 'Size of the context.'),
        ('int', 'trsf_emb_size', 80, 'The size of the embedding for the vocabulary and the context'),
        ('int', 'trsf_nb_heads', 2, 'The number of attention heads to use for transformer'),
        ('int', 'trsf_nb_layers', 4, 'The number of layers to use for transformer')
    ]