def __init__(self, dim, samples , init_w= tf.random_uniform_initializer(minval= -0.01, maxval= 0.01) , ftype= tf.float32, scope= 'dbn'): self.dim, self.ftype = dim, ftype with tf.variable_scope(scope): self.rbm = tuple( Rbm(scope= "rbm{}".format(i) , dim_v= dim_v , dim_h= dim_h , samples= samples , init_w= init_w , ftype= self.ftype) for i, (dim_v, dim_h) in enumerate(zip(dim, dim[1:]), 1)) self.w = tuple(rbm.w for rbm in self.rbm[::-1]) self.wg = tuple(tf.transpose(w) for w in self.w) self.wr = tuple( tf.get_variable(name= "wr{}".format(i), shape= (dim_d, dim_a), initializer= init_w) for i, (dim_d, dim_a) in enumerate(zip(self.dim, self.dim[1:]), 1)) self.lr_ = tf.placeholder(name= 'lr_', dtype= self.ftype, shape= ()) # wake self.v_ = self.rbm[0].v_ with tf.name_scope('wake'): recogn = [self.v_] for w in self.wr: recogn.append(binary(tf.matmul(recogn[-1], w))) self.recogn = tuple(recogn) recogn = recogn[::-1] eps = self.lr_ / tf.cast(tf.shape(self.v_)[0], dtype= self.ftype) self.wake = tuple( w.assign_add(tf.matmul((sj - pj), sk, transpose_a= True) * eps).op for w, sk, sj, pj in zip( self.w, recogn, recogn[1:] , (tf.sigmoid(tf.matmul(s, w)) for w, s in zip(self.wg, recogn)))) # sleep top = self.rbm[-1] self.k_, (self.v, self.a) = top.k_, top.gibbs with tf.name_scope('sleep'): recons = [self.a, self.v] for w in self.wg[1::]: recons.append(binary(tf.matmul(recons[-1], w))) self.recons = tuple(recons) recons = recons[::-1] eps = self.lr_ / tf.cast(tf.shape(self.a)[0], dtype= self.ftype) self.sleep = tuple( w.assign_add(tf.matmul(sj, (sk - qk), transpose_a= True) * eps).op for w, sj, sk, qk in zip( self.wr, recons, recons[1:] , (tf.sigmoid(tf.matmul(s, w)) for w, s in zip(self.wr, recons)))) # the waking world is the amnesia of dream. self.v = self.recons[-1] with tf.name_scope('ances'): self.a = self.rbm[-1].h ances = [self.a] for w in self.wg: ances.append(binary(tf.matmul(ances[-1], w))) self.ances = ances[-1] self.step = 0
def __init__(self, dim_v, dim_h, samples , init_w= tf.random_uniform_initializer(minval= -0.01, maxval= 0.01) , ftype= tf.float32 , scope= 'rbm'): self.dim_v, self.dim_h, self.ftype, self.scope = dim_v, dim_h, ftype, scope with tf.variable_scope(scope): # todo add bias self.w = tf.get_variable(name= 'w', shape= (self.dim_v, self.dim_h), initializer= init_w) # positive stage: inference self.v_ = tf.placeholder(name= 'v_', dtype= self.ftype, shape= (None, self.dim_v)) with tf.name_scope('hgv'): self.hgv = tf.sigmoid(tf.matmul(self.v_, self.w)) # self.act_h = binary(self.hgv, transform= False, threshold= None) # self.h_ = tf.placeholder(name= 'h_', dtype= self.ftype, shape= (None, self.dim_h)) # self.vgh = tf.matmul(self.h_, self.w, transpose_b= True) # self.act_v = binary(self.vgh, transform= False, threshold= None) with tf.name_scope('pos'): self.pos = tf.matmul(self.v_, self.hgv, transpose_a= True) self.pos /= tf.cast(tf.shape(self.v_)[0], dtype= self.ftype) # negative stage: stochastic approximation self.v = binary_variable(name= 'v', shape= (samples, self.dim_v), dtype= self.ftype) self.h = binary_variable(name= 'h', shape= (samples, self.dim_h), dtype= self.ftype) self.k_ = tf.placeholder(name= 'k_', dtype= tf.int32, shape= ()) def gibbs(v, _h): h = binary(tf.matmul(v, self.w)) v = binary(tf.matmul(h, self.w, transpose_b= True)) # todo real valued v # v = tf.sigmoid(tf.matmul(h, self.w, transpose_b= True)) return v, h with tf.name_scope('gibbs'): vh = self.v, self.h v, h = self.gibbs = tuple( tf.assign(x, x2, validate_shape= False) for x, x2 in zip( vh, tf.while_loop( loop_vars= (self.k_, vh) , cond= lambda k, vh: (0 < k) , body= lambda k, vh: (k - 1, gibbs(*vh)))[1])) with tf.name_scope('neg'): # todo update with real probabilities instead of binaries h = tf.sigmoid(tf.matmul(v, self.w)) v = tf.sigmoid(tf.matmul(h, self.w, transpose_b= True)) self.neg = tf.matmul(v, h, transpose_a= True) self.neg /= tf.cast(tf.shape(self.v)[0], dtype= self.ftype) self.lr_ = tf.placeholder(name= 'lr_', dtype= self.ftype, shape= ()) with tf.name_scope('up'): self.up = self.w.assign_add((self.pos - self.neg) * self.lr_).op self.step = 0
def build_fcn_net(self, inp, use_dice=False): with self.graph.as_default(): self.saver = tf.train.Saver(max_to_keep=1) with tf.name_scope("Out"): bn1 = tf.layers.batch_normalization(inputs=inp, name='bn1') dnn1 = tf.layers.dense(bn1, 200, activation=None, name='f1') if use_dice: dnn1 = dice(dnn1, name='dice_1') else: dnn1 = prelu(dnn1, 'prelu1') dnn2 = tf.layers.dense(dnn1, 80, activation=None, name='f2') if use_dice: dnn2 = dice(dnn2, name='dice_2') else: dnn2 = prelu(dnn2, 'prelu2') dnn3 = tf.layers.dense(dnn2, 2, activation=None, name='f3') self.y_hat = tf.nn.softmax(dnn3) + 0.00000001 with tf.name_scope('Metrics'): # Cross-entropy loss and optimizer initialization # 'core_type_ph': [1, 1, 0,..], ctr_loss = - tf.reduce_mean(tf.log(self.y_hat) * self.target_ph) self.loss = ctr_loss # tf.summary.scalar('loss', self.loss) self.optimizer = tf.train.AdamOptimizer(learning_rate=self.lr_ph).minimize(self.loss) # self.optimizer = tf.train.GradientDescentOptimizer(learning_rate=self.lr_ph).minimize(self.loss) # Accuracy metric self.accuracy = tf.reduce_mean(tf.cast(tf.equal(tf.round(self.y_hat), self.target_ph), tf.float32)) # tf.summary.scalar('accuracy', self.accuracy) self.merged = tf.summary.merge_all()
def __init__(self, dim, samples , init_w= tf.random_uniform_initializer(minval= -0.01, maxval= 0.01) , ftype= tf.float32, scope= 'sbn'): self.dim, self.ftype, self.scope = dim, ftype, scope with tf.variable_scope(scope): self.wr = tuple( tf.get_variable(name= "wr{}".format(i), shape= (dim_d, dim_a), initializer= init_w) for i, (dim_d, dim_a) in enumerate(zip(self.dim, self.dim[1:]), 1)) self.wg = tuple( tf.get_variable(name= "wg{}".format(i), shape= (dim_a, dim_d), initializer= init_w) for i, (dim_d, dim_a) in enumerate(zip(self.dim, self.dim[1:]), 1))[::-1] self.lr_ = tf.placeholder(name= 'lr_', dtype= self.ftype, shape= ()) # wake self.v_ = tf.placeholder(name= 'v_', dtype= self.ftype, shape= (None, self.dim[0])) with tf.name_scope('wake'): recogn = [self.v_] for w in self.wr: recogn.append(binary(tf.matmul(recogn[-1], w))) self.recogn = tuple(recogn) recogn = recogn[::-1] eps = self.lr_ / tf.cast(tf.shape(self.v_)[0], dtype= self.ftype) self.wake = tuple( w.assign_add(tf.matmul(sk, (sj - pj), transpose_a= True) * eps).op for w, sk, sj, pj in zip( self.wg, recogn, recogn[1:] , (tf.sigmoid(tf.matmul(s, w)) for w, s in zip(self.wg, recogn)))) # sleep with tf.name_scope('a'): self.a = tf.round(tf.random_uniform(shape= (samples, self.dim[-1]))) with tf.name_scope('sleep'): recons = [self.a] for w in self.wg: recons.append(binary(tf.matmul(recons[-1], w))) self.recons = tuple(recons) recons = recons[::-1] eps = self.lr_ / tf.cast(tf.shape(self.a)[0], dtype= self.ftype) self.sleep = tuple( w.assign_add(tf.matmul(sj, (sk - qk), transpose_a= True) * eps).op for w, sj, sk, qk in zip( self.wr, recons, recons[1:] , (tf.sigmoid(tf.matmul(s, w)) for w, s in zip(self.wr, recons)))) # the waking world is the amnesia of dream. self.v = self.recons[-1] self.step = 0
def get_self_or_expand_dims(aim): return tf.cast(tf.expand_dims(aim, -1), tf.float32)
def __init__(self, dim, samples , init_w= tf.random_uniform_initializer(minval= -0.01, maxval= 0.01) , ftype= tf.float32, scope= 'dbm'): self.dim, self.ftype = dim, ftype # todo pretraining with tf.variable_scope(scope): self.rbm = tuple( Rbm(scope= "rbm{}".format(i) , dim_v= dim_v , dim_h= dim_h , samples= samples , init_w= init_w , ftype= self.ftype) for i, (dim_v, dim_h) in enumerate(zip(dim, dim[1:]), 1)) self.w = tuple(rbm.w for rbm in self.rbm) # positive stage: variational inference self.m = tuple(rbm.h for rbm in self.rbm) self.v_ = self.rbm[0].v_ self.k_meanf_ = tf.placeholder(name= 'k_meanf_', dtype= tf.int32, shape= ()) def meanf(m): mf, ml = [], self.v_ for wl, wr, mr in zip(self.w, self.w[1:], m[1:]): mf.append(tf.sigmoid(tf.matmul(ml, wl) + tf.matmul(mr, wr, transpose_b= True))) ml = mf[-1] mf.append(tf.sigmoid(tf.matmul(ml, wr))) return tuple(mf) with tf.name_scope('meanf'): self.meanf = tuple( tf.assign(m, mf, validate_shape= False) for m, mf in zip( self.m, tf.while_loop( loop_vars= (self.k_meanf_, self.m) , cond= lambda k, _: (0 < k) , body= lambda k, m: (k - 1, meanf(m)))[1])) with tf.name_scope('pos'): bs = tf.cast(tf.shape(self.v_)[0], dtype= self.ftype) vm = (self.v_,) + self.meanf self.pos = tuple((tf.matmul(ml, mr, transpose_a= True) / bs) for ml, mr in zip(vm, vm[1:])) # negative stage: stochastic approximation self.x = tuple(rbm.v for rbm in self.rbm) self.x += (binary_variable(name= 'x', shape= (samples, self.dim[-1]), dtype= self.ftype),) self.v = self.x[0] self.k_gibbs_ = tf.placeholder(name= 'k_gibbs_', dtype= tf.int32, shape= ()) def gibbs(x): x = list(x) # update odd layers for i, (xl, xr, wl, wr) in enumerate(zip(x[::2], x[2::2], self.w, self.w[1:])): x[1+(2*i)] = binary(tf.matmul(xl, wl) + tf.matmul(xr, wr, transpose_b= True)) # update first layer x[0] = binary(tf.matmul(x[1], self.w[0], transpose_b= True)) # update even layers for i, (xl, xr, wl, wr) in enumerate(zip(x[1::2], x[3::2], self.w[1:], self.w[2:])): x[2+(2*i)] = binary(tf.matmul(xl, wl) + tf.matmul(xr, wr, transpose_b= True)) # update last layer x[-1] = binary(tf.matmul(x[-2], self.w[-1])) return tuple(x) with tf.name_scope('gibbs'): x = self.gibbs = tuple( tf.assign(x, xg, validate_shape= False) for x, xg in zip( self.x, tf.while_loop( loop_vars= (self.k_gibbs_, self.x) , cond= lambda k, x: (0 < k) , body= lambda k, x: (k - 1, gibbs(x)))[1])) with tf.name_scope('neg'): bs = tf.cast(tf.shape(self.v)[0], dtype= self.ftype) self.neg = tuple((tf.matmul(xl, xr, transpose_a= True) / bs) for xl, xr in zip(x, x[1:])) # parameter update self.lr_ = tf.placeholder(name= 'lr_', dtype= self.ftype, shape= ()) with tf.name_scope('up'): self.up = tuple( w.assign_add((pos - neg) * self.lr_).op for w, pos, neg in zip(self.w, self.pos, self.neg)) self.step = 0
def __init__(self, *, use_dice=False): self.graph = tf.Graph() self.tensor_info = {} self.use_dice = use_dice with self.graph.as_default(): # Main Inputs with tf.name_scope('Main_Inputs'): self.target_ph = tf.placeholder(tf.float32, [None, None], name='target_ph') self.lr_ph = tf.placeholder(tf.float32, [], name="lr_ph") # with tf.name_scope("Teacher_Info"): self.teacher_id_ph = tf.placeholder(tf.int32, [ None, ], name="teacher_id_ph") self.student_count_ph = tf.placeholder(tf.int32, [ None, ], name="student_count_ph") self.province_id_ph = tf.placeholder(tf.int32, shape=[ None, ], name="province_id_ph") self.city_id_ph = tf.placeholder(tf.int32, shape=[ None, ], name="city_id_ph") # TODO: binary 是否金牌讲师 self.core_type_ph = tf.placeholder(tf.int32, shape=[ None, ], name="core_type_ph") # with tf.name_scope("Class_Info"): #今天 教的班级id self.class_id_ph = tf.placeholder(tf.int32, [ None, ], name="class_id_ph") #课本版本 self.edition_id_ph = tf.placeholder(tf.int32, [ None, ], name="edition_id_ph") self.grade_id_ph = tf.placeholder(tf.int32, [ None, ], name="grade_id_ph") #老师教的所有班级 学生总人数, int连续特征 self.class_student_ph = tf.placeholder(tf.int32, [ None, ], name="class_student_ph") #kefei #浮点连续特征 self.cap_avg_ph = tf.placeholder(tf.float32, [ None, ], name="cap_avg_ph") self.cap_max_ph = tf.placeholder(tf.float32, [ None, ], name="cap_max_ph") self.cap_min_ph = tf.placeholder(tf.float32, [ None, ], name="cap_min_ph") # with tf.name_scope("Homework_Info"): #候选, 召回集 首先天宇会给一个初步刷选的作业集,好几组,每组进去很多题目,暂且不管 #粒度暂且放在一组,上,看做一个 作业, 特征属性两个chapters sections #这一组中,今天这个老师 id,,布置了某一组,则吧这个 label 为1 其他布置的 为0 这样就构造了样本 #另外chapters sections分别可能是几个数字®️的, 类似于多lable吧, 为了保持统一长度, 所以补零 self.today_chapters_ph = tf.placeholder( tf.int32, [None, None], name="today_chapters_ph") self.today_sections_ph = tf.placeholder( tf.int32, [None, None], name="today_sections_ph") #没用 self.today_chap_mask_ph = tf.placeholder( tf.float32, [None, None], name='today_chap_mask_ph') self.today_chap_len_ph = tf.placeholder( tf.int32, [ None, ], name='today_chap_len_ph') self.today_sec_mask_ph = tf.placeholder( tf.float32, [None, None], name='today_sec_mask_ph') self.today_sec_len_ph = tf.placeholder(tf.int32, [ None, ], name='today_sec_len_ph') #作业的风格 是这道题的 风格, 什么预习啊 什么深度啊,, self.today_style_ph = tf.placeholder(tf.int32, [ None, ], name='today_style_ph') # TODO: use three dims to capture more history info #这个是 这个班级前三天 给布置的作业, 比如,昨天的,仍是两个特征来表征,chap sec 每个都是多个数字, #所以N N for fir in [ 'one', 'two', 'three', 'four', 'five', 'six', 'seven', 'eight', 'nine', 'ten', 'eleven', 'twelve', 'thirteen', 'fourteen' ]: key = "history_" + fir + "_chap_ph" setattr(self, key, tf.placeholder(tf.int32, [None, None], name=key)) for fir in [ 'one', 'two', 'three', 'four', 'five', 'six', 'seven', 'eight', 'nine', 'ten', 'eleven', 'twelve', 'thirteen', 'fourteen' ]: key = "history_" + fir + "_sec_ph" setattr(self, key, tf.placeholder(tf.int32, [None, None], name=key)) # TODO: All belows should consider the type and input # with tf.name_scope("Study_Info"): # TODO: study_vector_ph's type can change? #kefei 这个班级的学习能力 类似于期中考试, 这个班级 表征为 20维的向量 int self.study_vector_ph = tf.placeholder(tf.int32, [None, 20], name="study_vector_ph") #上面的结果 什么时候评测的 连续值,隔的天数 self.gap_days_ph = tf.placeholder(tf.int32, [ None, ], name="gap_days_ph") # with tf.name_scope("Submit_Info"): 这个班级 一个月的app内 作业提交率 ,连续float特征 self.month_submit_rate_ph = tf.placeholder( tf.float32, [ None, ], name="month_submit_rate_ph") # with tf.name_scope("Capacity_Info"): 地区区域整体能力 也是 float连续特征 self.region_capacity_ph = tf.placeholder( tf.float32, [ None, ], name="region_capacity_ph") # with tf.name_scope("Prefer_Info"): #老师 在这个班级 上,,喜欢布置作业的 难度 和时间 float连续值特征 self.prefer_assign_time_avg_ph = tf.placeholder( tf.float32, [ None, ], name="prefer_assign_time_avg_ph") self.prefer_assign_time_var_ph = tf.placeholder( tf.float32, [ None, ], name="prefer_assign_time_var_ph") self.prefer_assign_rank_avg_ph = tf.placeholder( tf.float32, [ None, ], name="prefer_assign_rank_avg_ph") self.prefer_assign_rank_var_ph = tf.placeholder( tf.float32, [ None, ], name="prefer_assign_rank_var_ph") # with tf.name_scope("Register_Info"): 老师 注册app的 时间,int连续值特征 self.register_diff_ph = tf.placeholder(tf.int32, [ None, ], name="register_diff_ph") # with tf.name_scope("HomeworkCount_Info"): 老师 布置了多少题目 int连续值特征 #是总共布置的吗 从注册app??? self.homework_count_ph = tf.placeholder( tf.int32, [ None, ], name="homework_count_ph") # with tf.name_scope("Style_Info"): # TODO: use 3 dims 老师 作业 的风格 ? 一个特征域 for fir in ["1", "2", "3", "4"]: for sec in [ "100", "010", "001", "110", "101", "011", "111" ]: key = "style_" + fir + "0" + sec + "_ph" setattr(self, key, tf.placeholder(tf.int32, [ None, ], name=key)) # with tf.name_scope("WeekHomeworkCount_Info"): #这周 老师布置作业,,作业率,,怎么float ?? 连续值特征 self.week_count_ph = tf.placeholder(tf.float32, [ None, ], name="week_count_ph") # with tf.name_scope("Reflect_Info"): # TODO: explore more graceful 映射 作业类目 self.reflect_value_ph = tf.placeholder(tf.int32, [None, None], name="reflect_value_ph") self.reflect_mask_ph = tf.placeholder(tf.float32, [None, None], name="reflect_mask_ph") self.reflect_len_ph = tf.placeholder(tf.int32, [ None, ], name="reflect_len_ph") # with tf.name_scope("Lastdat_Info"): 昨天布置的 个数 int连续实值 self.lastday_count_ph = tf.placeholder(tf.int32, [ None, ], name="lastday_count_ph") # Embedding layer with tf.name_scope('Main_Embedding_layer'): # almost done with tf.name_scope("Others"): # teacher with tf.name_scope("Teacher"): self.teacher_id_embeddings_var = tf.get_variable( "teacher_id_embeddings_var", [N_TEACHER, EMBEDDING_DIM], ) # tf.summary.histogram('teacher_id_embeddings_var', self.teacher_id_embeddings_var) self.teacher_id_embedded = tf.nn.embedding_lookup( self.teacher_id_embeddings_var, self.teacher_id_ph, ) self.province_id_embeddings_var = tf.get_variable( "province_id_embeddings_var", [N_PROVINCE, EMBEDDING_DIM]) # tf.summary.histogram('province_id_embeddings_var', self.province_id_embeddings_var) self.province_id_embedded = tf.nn.embedding_lookup( self.province_id_embeddings_var, self.province_id_ph) self.city_id_embeddings_var = tf.get_variable( "city_id_embeddings_var", [N_CITY, EMBEDDING_DIM]) # tf.summary.histogram('city_id_embeddings_var', self.city_id_embeddings_var) self.city_id_embedded = tf.nn.embedding_lookup( self.city_id_embeddings_var, self.city_id_ph) self.core_type_embeddings_var = tf.get_variable( "core_type_embeddings_var", [2, EMBEDDING_DIM]) # tf.summary.histogram('core_type_embeddings_var', self.core_type_embeddings_var) self.core_type_embedded = tf.nn.embedding_lookup( self.core_type_embeddings_var, self.core_type_ph) # just to use embedded for var,maybe tf.identify? self.student_count_embedded = get_self_or_expand_dims( self.student_count_ph) with tf.name_scope("Class"): self.class_id_embeddings_var = tf.get_variable( "class_id_embeddings_var", [N_CLASS, EMBEDDING_DIM]) # tf.summary.histogram('class_id_embeddings_var', self.class_id_embeddings_var) self.class_id_embedded = tf.nn.embedding_lookup( self.class_id_embeddings_var, self.class_id_ph) self.edition_id_embeddings_var = tf.get_variable( "edition_id_embeddings_var", [N_EDITION, EMBEDDING_DIM]) # tf.summary.histogram('edition_id_embeddings_var', self.edition_id_embeddings_var) self.edition_id_embedded = tf.nn.embedding_lookup( self.edition_id_embeddings_var, self.edition_id_ph) self.grade_id_embeddings_var = tf.get_variable( "grade_id_embeddings_var", [N_GRADE, EMBEDDING_DIM]) # tf.summary.histogram('grade_id_embeddings_var', self.grade_id_embeddings_var) self.grade_id_embedded = tf.nn.embedding_lookup( self.grade_id_embeddings_var, self.grade_id_ph) # just to use embedded for var,maybe tf.identify? #连续值 dense 本身有意义的直接喂入 self.class_student_embedded = get_self_or_expand_dims( self.class_student_ph) self.cap_avg_embedded = get_self_or_expand_dims( self.cap_avg_ph) self.cap_max_embedded = get_self_or_expand_dims( self.cap_max_ph) self.cap_min_embedded = get_self_or_expand_dims( self.cap_min_ph) with tf.name_scope("Study"): # just to use embedded for var,maybe tf.identify? self.study_vector_embedded = tf.cast( self.study_vector_ph, tf.float32) self.gap_days_embedded = get_self_or_expand_dims( self.gap_days_ph) with tf.name_scope("Submit"): # just to use embedded for var,maybe tf.identify? self.month_submit_rate_embedded = get_self_or_expand_dims( self.month_submit_rate_ph) with tf.name_scope("Capacity"): # just to use embedded for var,maybe tf.identify? self.region_capacity_embedded = get_self_or_expand_dims( self.region_capacity_ph) with tf.name_scope("Prefer"): # just to use embedded for var,maybe tf.identify? self.prefer_assign_time_avg_embedded = get_self_or_expand_dims( self.prefer_assign_time_avg_ph) self.prefer_assign_time_var_embedded = get_self_or_expand_dims( self.prefer_assign_time_var_ph) self.prefer_assign_rank_avg_embedded = get_self_or_expand_dims( self.prefer_assign_rank_avg_ph) self.prefer_assign_rank_var_embedded = get_self_or_expand_dims( self.prefer_assign_rank_var_ph) with tf.name_scope("Register"): self.register_diff_embedded = get_self_or_expand_dims( self.register_diff_ph) with tf.name_scope("HomeworkCount"): self.homework_count_embedded = get_self_or_expand_dims( self.homework_count_ph) with tf.name_scope("WeekHomeworkCount"): self.week_count_embedded = get_self_or_expand_dims( self.week_count_ph) with tf.name_scope("Lastday"): self.lastday_count_embedded = get_self_or_expand_dims( self.lastday_count_ph) # TODO: homework and reflect and style with tf.name_scope("Style"): for fir in ["1", "2", "3", "4"]: for sec in [ "100", "010", "001", "110", "101", "011", "111" ]: key = "style_" + fir + "0" + sec + "_ph" embed_key = "style_" + fir + "0" + sec + "_embedded" setattr( self, embed_key, get_self_or_expand_dims(getattr(self, key))) # homework with tf.name_scope("Homework"): self.style_embeddings_var = tf.get_variable( "style_embeddings_var", [N_STYLE, EMBEDDING_DIM]) self.chapters_embeddings_var = tf.get_variable( "chapters_embeddings_var", [N_CHAPTER, EMBEDDING_DIM]) self.sections_embeddings_var = tf.get_variable( "sections_embeddings_var", [N_SECTION, EMBEDDING_DIM]) # tf.summary.histogram('homework_embeddings_var', self.homework_embeddings_var) self.today_chapters_embedded = get_mask_zero_embedded( self.chapters_embeddings_var, self.today_chapters_ph) self.today_sections_embedded = get_mask_zero_embedded( self.sections_embeddings_var, self.today_sections_ph) self.history_chap_embedded, self.history_sec_embedded = get_history_gru_embedded( self) self.today_style_embedded = tf.nn.embedding_lookup( self.style_embeddings_var, self.today_style_ph) # reflect with tf.name_scope("Reflect"): self.reflect_embeddings_var = tf.get_variable( "reflect_embeddings_var", [N_REFLECT, EMBEDDING_DIM]) # tf.summary.histogram('reflect_embeddings_var', self.reflect_embeddings_var) self.reflect_value_embedded = get_mask_zero_embedded( self.reflect_embeddings_var, self.reflect_value_ph)