batch_size = 256 np.random.seed(1234) # set seed lv = 1e-2 # lambda_v/lambda_n in CDL dir_save = 'cdl%d' % p if not os.path.isdir(dir_save): os.system('mkdir %s' % dir_save) fp = open(dir_save + '/cdl.log', 'w') print 'p%d: lambda_v/lambda_u/ratio/K: %f/%f/%f/%d' % (p, lambda_v, lambda_u, lv, K) fp.write('p%d: lambda_v/lambda_u/ratio/K: %f/%f/%f/%d\n' % \ (p,lambda_v,lambda_u,lv,K)) fp.close() if is_dummy: X = data.get_dummy_mult() R = data.read_dummy_user() else: X = data.get_mult() R = data.read_user() # set to INFO to see less information during training logging.basicConfig(level=logging.DEBUG) #ae_model = AutoEncoderModel(mx.gpu(0), [784,500,500,2000,10], pt_dropout=0.2, # internal_act='relu', output_act='relu') ae_model = AutoEncoderModel(mx.cpu(2), [X.shape[1], 100, K], pt_dropout=0.2, internal_act='relu', output_act='relu') train_X = X #ae_model.layerwise_pretrain(train_X, 256, 50000, 'sgd', l_rate=0.1, decay=0.0,
num_iter = 34000 batch_size = 256 np.random.seed(1234) # set seed lv = 1e-2 # lambda_v/lambda_n in CDL dir_save = 'cdl%d' % p if not os.path.isdir(dir_save): os.system('mkdir %s' % dir_save) fp = open(dir_save+'/cdl.log','w') print 'p%d: lambda_v/lambda_u/ratio/K: %f/%f/%f/%d' % (p,lambda_v,lambda_u,lv,K) fp.write('p%d: lambda_v/lambda_u/ratio/K: %f/%f/%f/%d\n' % \ (p,lambda_v,lambda_u,lv,K)) fp.close() if is_dummy: X = data.get_dummy_mult() R = data.read_dummy_user() else: X = data.get_mult() R = data.read_user() # set to INFO to see less information during training logging.basicConfig(level=logging.DEBUG) #ae_model = AutoEncoderModel(mx.gpu(0), [784,500,500,2000,10], pt_dropout=0.2, # internal_act='relu', output_act='relu') #mx.cpu() no param needed for cpu. ae_model = AutoEncoderModel(mx.cpu(), [X.shape[1],100,K], pt_dropout=0.2, internal_act='relu', output_act='relu') train_X = X #ae_model.layerwise_pretrain(train_X, 256, 50000, 'sgd', l_rate=0.1, decay=0.0,