def set_data(self, data): self.data = data self.mean = data.rate.mean() self.b_u = utils.randmat(data.num_u, 1).reshape(data.num_u) self.b_i = utils.randmat(data.num_i, 1).reshape(data.num_i) self.p_u = utils.randmat(data.num_u, self.dim) self.q_i = utils.randmat(data.num_i, self.dim)
def set_data(self,data): self.data = data self.mean = data.rate.mean() self.b_u = utils.randmat(data.num_u,1).reshape(data.num_u) self.b_i = utils.randmat(data.num_i,1).reshape(data.num_i) self.p_u = utils.randmat(data.num_u,self.dim) self.q_i = utils.randmat(data.num_i,self.dim)
def build_mnn(self, fid, param_list): dimin = self.incoming with tf.variable_scope(self.scope): w = randmat((dimin, self.dim_h), name='w{}_{}_{}'.format(0, self.name, fid)) b = tf.Variable(tf.zeros((self.dim_h, )), name='b{}_{}_{}'.format(0, self.name, fid)) param_list.append([(w, b)]) for l in range(self.n_hidden): wh = randmat((self.dim_h, self.dim_h), name='w{}_{}_{}'.format(l + 1, self.name, fid)) bh = tf.Variable(tf.zeros((self.dim_h, )), name='b{}_{}_{}'.format( l + 1, self.name, fid)) param_list[-1].append((wh, bh)) wout = randmat((self.dim_h, dimin), name='w{}_{}_{}'.format(self.n_hidden, self.name, fid)) bout = tf.Variable(tf.zeros((dimin, )), name='b{}_{}_{}'.format(self.n_hidden, self.name, fid)) wout2 = randmat( (self.dim_h, dimin), name='w{}_{}_{}_sigma'.format(self.n_hidden, self.name, fid)) bout2 = tf.Variable(tf.ones((dimin, )) * 2, name='b{}_{}_{}_sigma'.format( self.n_hidden, self.name, fid)) param_list[-1].append((wout, bout, wout2, bout2))
def build(self): with tf.variable_scope(self.scope): for flow in xrange(self.n_flows): w = randmat((self.incoming, 1), name='w_{}_{}'.format(flow, self.name)) u = randmat((self.incoming, 1), name='u_{}_{}'.format(flow, self.name)) b = tf.Variable(tf.zeros((1,)), name='b_{}_{}'.format(flow, self.name)) self.params.append([w, u, b])
def LBFGS(self,max_iter=45): b_u = utils.randmat(self.data.num_u,1).reshape(self.data.num_u).tolist() b_i = utils.randmat(self.data.num_i,1).reshape(self.data.num_i).tolist() start = time.clock() re=optimize.fmin_l_bfgs_b(self._lossfun,b_u+b_i,fprime=self._fprime,maxiter=max_iter)[0] self.b_u=re[:self.data.num_u] self.b_i=re[self.data.num_u:] end = time.clock() print "time:%f"%(end-start)
def LBFGS(self, max_iter=45): b_u = utils.randmat(self.data.num_u, 1).reshape(self.data.num_u).tolist() b_i = utils.randmat(self.data.num_i, 1).reshape(self.data.num_i).tolist() start = time.clock() re = optimize.fmin_l_bfgs_b(self._lossfun, b_u + b_i, fprime=self._fprime, maxiter=max_iter)[0] self.b_u = re[:self.data.num_u] self.b_i = re[self.data.num_u:] end = time.clock() print "time:%f" % (end - start)
def LBFGS(self,max_iter=100): n = self.data.num_u m = self.data.num_i d = self.dim bu = utils.randmat(self.data.num_u,1).reshape(self.data.num_u).tolist() bi = utils.randmat(self.data.num_i,1).reshape(self.data.num_i).tolist() pu = utils.randmat(n,d).reshape(self.data.num_u*self.dim).tolist() qi = utils.randmat(m,d).reshape(self.data.num_i*self.dim).tolist() start = time.time() re=optimize.fmin_l_bfgs_b(self._lossfun,bu+bi+pu+qi,fprime=self._fprime,maxiter=max_iter)[0] self.b_u=re[:n] self.b_i=re[n:n+m] self.p_u=re[n+m:n+m+n*d].reshape((n,d)) self.q_i=re[n+m+n*d:].reshape((m,d)) end = time.time() print "time:%f"%(end-start)
def SGDtrain(self, max_iter=60): self.b_u = utils.randmat(self.data.num_u, 1).reshape(self.data.num_u) self.b_i = utils.randmat(self.data.num_i, 1).reshape(self.data.num_i) # self.b_u = np.zeros(self.data.num_u) # self.b_i = np.zeros(self.data.num_i) print "SGD Start..." start = time.clock() preRmse = 1e10 nowRmse = 0.0 for step in xrange(max_iter): rmse = 0 # a=0 # b=0 # c=0 for k in xrange(self.data.M): u = self.data.row[k] i = self.data.col[k] # eui = self.data.rate[k]-self.pred(u,i) eui = self.data.rate[k] - self.mean - self.b_u[u] - self.b_i[i] # a += time.clock() rmse += math.pow(eui, 2) # b += time.clock() self.b_u[u] += self.alpha * (eui - self.beta1 * self.b_u[u]) self.b_i[i] += self.alpha * (eui - self.beta1 * self.b_i[i]) # c += time.clock() # print b-a,c-b,c-a nowRmse = math.sqrt(rmse * 1.0 / self.data.M) if nowRmse >= preRmse and abs(preRmse - nowRmse) <= 1e-5 and step >= 3: break else: preRmse = nowRmse print "%d\t%f" % (step, nowRmse) self.alpha *= self.slowrate print "Interation Complete!" end = time.clock() print "time:%f" % (end - start)
def SGDtrain(self,max_iter=60): self.b_u = utils.randmat(self.data.num_u,1).reshape(self.data.num_u) self.b_i = utils.randmat(self.data.num_i,1).reshape(self.data.num_i) # self.b_u = np.zeros(self.data.num_u) # self.b_i = np.zeros(self.data.num_i) print "SGD Start..." start = time.clock() preRmse = 1e10 nowRmse = 0.0 for step in xrange(max_iter): rmse = 0 # a=0 # b=0 # c=0 for k in xrange(self.data.M): u = self.data.row[k] i = self.data.col[k] # eui = self.data.rate[k]-self.pred(u,i) eui = self.data.rate[k] - self.mean-self.b_u[u]-self.b_i[i] # a += time.clock() rmse += math.pow(eui,2) # b += time.clock() self.b_u[u] += self.alpha*(eui-self.beta1*self.b_u[u]) self.b_i[i] += self.alpha*(eui-self.beta1*self.b_i[i]) # c += time.clock() # print b-a,c-b,c-a nowRmse = math.sqrt(rmse*1.0/self.data.M) if nowRmse >= preRmse and abs(preRmse-nowRmse)<=1e-5 and step>=3: break else: preRmse = nowRmse print "%d\t%f"%(step,nowRmse) self.alpha *= self.slowrate print "Interation Complete!" end = time.clock() print "time:%f"%(end-start)
def LBFGS(self, max_iter=100): n = self.data.num_u m = self.data.num_i d = self.dim bu = utils.randmat(self.data.num_u, 1).reshape(self.data.num_u).tolist() bi = utils.randmat(self.data.num_i, 1).reshape(self.data.num_i).tolist() pu = utils.randmat(n, d).reshape(self.data.num_u * self.dim).tolist() qi = utils.randmat(m, d).reshape(self.data.num_i * self.dim).tolist() start = time.time() re = optimize.fmin_l_bfgs_b(self._lossfun, bu + bi + pu + qi, fprime=self._fprime, maxiter=max_iter)[0] self.b_u = re[:n] self.b_i = re[n:n + m] self.p_u = re[n + m:n + m + n * d].reshape((n, d)) self.q_i = re[n + m + n * d:].reshape((m, d)) end = time.time() print "time:%f" % (end - start)
def build(self): stack_size = self.input_shape[-1] self.W_shape = (self.nb_row, self.nb_col, stack_size, self.nb_filter) self.input_dim = self.nb_col * stack_size * self.nb_row self.stack_size = stack_size with tf.variable_scope(self.name): self.mu_W = randmat(self.W_shape, name='mean_W') self.logvar_W = randmat(self.W_shape, mu=-9., name='logvar_W', extra_scale=1e-6) self.mu_bias = tf.Variable(tf.zeros((self.nb_filter, )), name='mean_bias') self.logvar_bias = randmat((self.nb_filter, ), mu=-9., name='logvar_bias', extra_scale=1e-6) if self.use_z: self.qzero_mean = randmat((self.nb_filter, ), name='dropout_rates_mean', mu=1. if self.n_flows_q == 0 else 0.) self.qzero = randmat((self.nb_filter, ), name='dropout_rates', mu=np.log(0.1), extra_scale=1e-6) self.rsr_M = randmat((self.nb_filter, ), name='var_r_aux') self.apvar_M = randmat((self.nb_filter, ), name='apvar_r_aux') self.rsri_M = randmat((self.nb_filter, ), name='var_r_auxi') self.pvar = randmat((self.input_dim, ), mu=np.log(self.prior_var), name='prior_var_r_p', extra_scale=1e-6, trainable=self.learn_p) self.pvar_bias = randmat((1, ), mu=np.log(self.prior_var_b), name='prior_var_r_p_bias', extra_scale=1e-6, trainable=self.learn_p) if self.n_flows_r > 0: self.flow_r = MaskedNVPFlow(self.nb_filter, n_flows=self.n_flows_r, name=self.name + '_fr', n_hidden=0, dim_h=2 * self.flow_dim_h, scope=self.name) if self.n_flows_q > 0: self.flow_q = MaskedNVPFlow(self.nb_filter, n_flows=self.n_flows_q, name=self.name + '_fq', n_hidden=0, dim_h=self.flow_dim_h, scope=self.name) print('Built layer {}, output_dim: {}, input_shape: {}, flows_r: {}, flows_q: {}, use_z: {}, learn_p: {}, ' \ 'pvar: {}, thres_var: {}'.format(self.name, self.nb_filter, self.input_shape, self.n_flows_r, self.n_flows_q, self.use_z, self.learn_p, self.prior_var, self.thres_var))
def build(self): dim_in, dim_out = self.input_dim, self.output_dim with tf.variable_scope(self.name): self.mu_W = randmat((dim_in, dim_out), name='mean_W', extra_scale=1.) self.logvar_W = randmat((dim_in, dim_out), mu=-9., name='var_W', extra_scale=1e-6) self.mu_bias = tf.Variable(tf.zeros((dim_out, )), name='mean_bias') self.logvar_bias = randmat((dim_out, ), mu=-9., name='var_bias', extra_scale=1e-6) if self.use_z: self.qzero_mean = randmat((dim_in, ), name='dropout_rates_mean', mu=1. if self.n_flows_q == 0 else 0.) self.qzero = randmat((dim_in, ), mu=np.log(0.1), name='dropout_rates', extra_scale=1e-6) self.rsr_M = randmat((dim_in, ), name='var_r_aux') self.apvar_M = randmat((dim_in, ), name='apvar_r_aux') self.rsri_M = randmat((dim_in, ), name='var_r_auxi') self.pvar = randmat((dim_in, ), mu=np.log(self.prior_var), name='prior_var_r_p', trainable=self.learn_p, extra_scale=1e-6) self.pvar_bias = randmat((1, ), mu=np.log(self.prior_var_b), name='prior_var_r_p_bias', trainable=self.learn_p, extra_scale=1e-6) if self.n_flows_r > 0: if dim_in == 1: self.flow_r = PlanarFlow(dim_in, n_flows=self.n_flows_r, name=self.name + '_fr', scope=self.name) else: self.flow_r = MaskedNVPFlow(dim_in, n_flows=self.n_flows_r, name=self.name + '_fr', n_hidden=0, dim_h=2 * self.flow_dim_h, scope=self.name) if self.n_flows_q > 0: if dim_in == 1: self.flow_q = PlanarFlow(dim_in, n_flows=self.n_flows_q, name=self.name + '_fq', scope=self.name) else: self.flow_q = MaskedNVPFlow(dim_in, n_flows=self.n_flows_q, name=self.name + '_fq', n_hidden=0, dim_h=self.flow_dim_h, scope=self.name) print 'Built layer', self.name, 'prior_var: {}'.format(self.prior_var), \ 'flows_q: {}, flows_r: {}, use_z: {}'.format(self.n_flows_q, self.n_flows_r, self.use_z), \ 'learn_p: {}, thres_var: {}'.format(self.learn_p, self.thres_var)
def reset(self): self.b_u = utils.randmat(self.data.num_u,1).reshape(self.data.num_u) self.b_i = utils.randmat(self.data.num_i,1).reshape(self.data.num_i)
def reset(self): self.b_u = utils.randmat(self.data.num_u, 1).reshape(self.data.num_u) self.b_i = utils.randmat(self.data.num_i, 1).reshape(self.data.num_i)