예제 #1
0
파일: Chorus.py 프로젝트: TalonCB/ReChorus
 def parse_model_args(parser):
     parser.add_argument(
         '--stage',
         type=int,
         default=2,
         help='Stage of training: 1-KG_pretrain, 2-recommendation.')
     parser.add_argument(
         '--base_method',
         type=str,
         default='BPR',
         help='Basic method to generate recommendations: BPR, GMF')
     parser.add_argument('--emb_size',
                         type=int,
                         default=64,
                         help='Size of embedding vectors.')
     parser.add_argument('--time_scalar',
                         type=int,
                         default=60 * 60 * 24 * 100,
                         help='Time scalar for time intervals.')
     parser.add_argument(
         '--category_col',
         type=str,
         default='i_category',
         help='The name of category column in item_meta.csv.')
     parser.add_argument(
         '--lr_scale',
         type=float,
         default=0.1,
         help='Scale the lr for parameters in pre-trained KG model.')
     parser.add_argument('--margin',
                         type=float,
                         default=1,
                         help='Margin in hinge loss.')
     return SequentialModel.parse_model_args(parser)
예제 #2
0
 def parse_model_args(parser):
     parser.add_argument('--emb_size',
                         type=int,
                         default=64,
                         help='Size of embedding vectors.')
     parser.add_argument('--gamma',
                         type=float,
                         default=1,
                         help='Coefficient of the contrastive loss.')
     parser.add_argument(
         '--beta_a',
         type=int,
         default=3,
         help='Parameter of the beta distribution for sampling.')
     parser.add_argument(
         '--beta_b',
         type=int,
         default=3,
         help='Parameter of the beta distribution for sampling.')
     parser.add_argument(
         '--ctc_temp',
         type=float,
         default=1,
         help='Temperature in context-target contrastive loss.')
     parser.add_argument(
         '--ccc_temp',
         type=float,
         default=0.2,
         help='Temperature in context-context contrastive loss.')
     parser.add_argument(
         '--encoder',
         type=str,
         default='BERT4Rec',
         help='Choose a sequence encoder: GRU4Rec, Caser, BERT4Rec.')
     return SequentialModel.parse_model_args(parser)
예제 #3
0
 def parse_model_args(parser):
     parser.add_argument('--emb_size',
                         type=int,
                         default=64,
                         help='Size of embedding vectors.')
     parser.add_argument('--mip_weight',
                         type=float,
                         default=0.2,
                         help='Coefficient of the MIP loss.')
     parser.add_argument('--sp_weight',
                         type=float,
                         default=0.5,
                         help='Coefficient of the SP loss.')
     parser.add_argument(
         '--mask_ratio',
         type=float,
         default=0.2,
         help='Proportion of masked positions in the sequence.')
     parser.add_argument(
         '--stage',
         type=int,
         default=1,
         help=
         'Stage of training: 1-pretrain, 2-finetune, default-from_scratch.')
     return SequentialModel.parse_model_args(parser)
예제 #4
0
 def parse_model_args(parser):
     parser.add_argument('--emb_size',
                         type=int,
                         default=64,
                         help='Size of embedding vectors.')
     parser.add_argument('--attn_size',
                         type=int,
                         default=8,
                         help='Size of attention vectors.')
     parser.add_argument('--K',
                         type=int,
                         default=2,
                         help='Number of hidden intent.')
     parser.add_argument('--add_pos',
                         type=int,
                         default=1,
                         help='Whether add position embedding.')
     parser.add_argument('--temp',
                         type=float,
                         default=1,
                         help='Temperature in knowledge distillation loss.')
     parser.add_argument('--n_layers',
                         type=int,
                         default=1,
                         help='Number of the projection layer.')
     parser.add_argument(
         '--stage',
         type=int,
         default=3,
         help=
         'Stage of training: 1-pretrain_extractor, 2-pretrain_predictor, 3-joint_finetune.'
     )
     return SequentialModel.parse_model_args(parser)
예제 #5
0
 def parse_model_args(parser):
     parser.add_argument('--emb_size', type=int, default=64,
                         help='Size of embedding vectors.')
     parser.add_argument('--hidden_size', type=int, default=100,
                         help='Size of hidden vectors in GRU.')
     parser.add_argument('--attention_size', type=int, default=50,
                         help='Size of attention hidden space.')
     return SequentialModel.parse_model_args(parser)
예제 #6
0
 def parse_model_args(parser):
     parser.add_argument('--emb_size',
                         type=int,
                         default=64,
                         help='Size of embedding vectors.')
     parser.add_argument('--t_scalar',
                         type=int,
                         default=60,
                         help='Time interval scalar.')
     return SequentialModel.parse_model_args(parser)
예제 #7
0
 def parse_model_args(parser):
     parser.add_argument('--emb_size', type=int, default=64,
                         help='Size of embedding vectors.')
     parser.add_argument('--attn_size', type=int, default=8,
                         help='Size of attention vectors.')
     parser.add_argument('--K', type=int, default=2,
                         help='Number of hidden intent.')
     parser.add_argument('--add_pos', type=int, default=1,
                         help='Whether add position embedding.')
     return SequentialModel.parse_model_args(parser)
예제 #8
0
파일: SRGNN.py 프로젝트: TalonCB/ReChorus
 def parse_model_args(parser):
     parser.add_argument('--emb_size',
                         type=int,
                         default=64,
                         help='Size of embedding vectors.')
     parser.add_argument('--num_layers',
                         type=int,
                         default=1,
                         help='Number of self-attention layers.')
     return SequentialModel.parse_model_args(parser)
예제 #9
0
파일: CLRec.py 프로젝트: THUwangcy/ReChorus
 def parse_model_args(parser):
     parser.add_argument('--emb_size',
                         type=int,
                         default=64,
                         help='Size of embedding vectors.')
     parser.add_argument('--temp',
                         type=float,
                         default=0.2,
                         help='Temperature in contrastive loss.')
     return SequentialModel.parse_model_args(parser)
예제 #10
0
파일: Caser.py 프로젝트: TalonCB/ReChorus
 def parse_model_args(parser):
     parser.add_argument('--emb_size',
                         type=int,
                         default=64,
                         help='Size of embedding vectors.')
     parser.add_argument('--num_horizon',
                         type=int,
                         default=16,
                         help='Number of horizon convolution kernels.')
     parser.add_argument('--num_vertical',
                         type=int,
                         default=8,
                         help='Number of vertical convolution kernels.')
     parser.add_argument('--L',
                         type=int,
                         default=4,
                         help='Union window size.')
     return SequentialModel.parse_model_args(parser)
예제 #11
0
파일: KDA.py 프로젝트: yarncraft/ReChorus
 def parse_model_args(parser):
     parser.add_argument('--emb_size', type=int, default=64,
                         help='Size of embedding vectors.')
     parser.add_argument('--neg_head_p', type=float, default=0.5,
                         help='The probability of sampling negative head entity.')
     parser.add_argument('--num_layers', type=int, default=1,
                         help='Number of self-attention layers.')
     parser.add_argument('--num_heads', type=int, default=1,
                         help='Number of attention heads.')
     parser.add_argument('--gamma', type=float, default=-1,
                         help='Coefficient of KG loss (-1 for auto-determine).')
     parser.add_argument('--attention_size', type=int, default=10,
                         help='Size of attention hidden space.')
     parser.add_argument('--pooling', type=str, default='average',
                         help='Method of pooling relational history embeddings: average, max, attention')
     parser.add_argument('--include_val', type=int, default=1,
                         help='Whether include relation value in the relation representation')
     return SequentialModel.parse_model_args(parser)
예제 #12
0
파일: FPMC.py 프로젝트: THUwangcy/ReChorus
 def parse_model_args(parser):
     parser.add_argument('--emb_size', type=int, default=64,
                         help='Size of embedding vectors.')
     return SequentialModel.parse_model_args(parser)