Exemple #1
0
from model import *
import config as cfg
import time
import os

loss, train_decode_result, pred_decode_result = build_network(is_training=True)
optimizer = tf.train.MomentumOptimizer(learning_rate=cfg.learning_rate,
                                       momentum=cfg.momentum,
                                       use_nesterov=True)
train_op = optimizer.minimize(loss)
saver = tf.train.Saver(max_to_keep=5)
img, label = cfg.read_data(cfg.train_dir)
num_train_samples = img.shape[0]
num_batches_per_epoch = int(num_train_samples / cfg.BATCH_SIZE)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
with tf.name_scope('summaries'):
    tf.summary.scalar("cost", loss)
summary_op = tf.summary.merge_all()
writer = tf.summary.FileWriter(cfg.LOGS_PATH)

target_in, target_out = cfg.label2int(label)
for cur_epoch in range(cfg.EPOCH):
    shuffle_idx = np.random.permutation(num_train_samples)
    train_cost = 0
    start_time = time.time()
    batch_time = time.time()
    # the tracing part
    for cur_batch in range(num_batches_per_epoch):
        batch_time = time.time()
        indexs = [
saver = tf.train.Saver(max_to_keep=5)

sess = tf.Session()
sess.run(tf.global_variables_initializer())
with tf.name_scope('summaries'):
    tf.summary.scalar("cost", loss)
summary_op = tf.summary.merge_all()
writer = tf.summary.FileWriter(cfg.LOGS_PATH)


if cfg.is_restore:
    ckpt = tf.train.latest_checkpoint(cfg.CKPT_DIR)
    if ckpt:
        saver.restore(sess,ckpt)
        print('restore from the checkpoint{0}'.format(ckpt))
img,label=cfg.read_data(cfg.train_dir)
val_img,val_label=cfg.read_data(cfg.val_dir)
num_train_samples=img.shape[0]
num_batches_per_epoch = int(num_train_samples/cfg.BATCH_SIZE)
target_in,target_out=cfg.label2int(label)
for cur_epoch in range(cfg.EPOCH):

    shuffle_idx = np.random.permutation(num_train_samples)
    train_cost = 0
    start_time = time.time()
    batch_time = time.time()
    # the tracing part
    for cur_batch in range(num_batches_per_epoch):
        val_img,val_label=shuffle(val_img,val_label)
        batch_time = time.time()
        indexs = [shuffle_idx[i % num_train_samples] for i in
Exemple #3
0
    d_rmses.append(d_rmse)
    omegas.append(omega)
    # break
  if path.isfile(outfile):
    print('%s exists' % path.basename(outfile))
    return
  data = {
    'e': e_rmses,
    'd': d_rmses,
    'o': omegas,
  }
  config.make_file_dir(outfile)
  pickle.dump(data, open(outfile, 'wb'))

#### load data
n_users, n_items, n_rates, indexes = config.read_data(song_file)
cmpl_rates = config.complete_rate(indexes)
dataset = n_users, n_items, n_rates, indexes, cmpl_rates
recom_list = config.provide_recom(indexes, cmpl_rates)

alpha = f_alpha
# v_omegas = np.arange(0.5, 1.55, 0.1)
v_omegas = np.arange(1.0, -0.05, -0.1)
v_omegas = np.arange(0.80, 1.25, 0.05)

risk = 'mae', np.absolute
n_mcar = 500
vary_error(n_mcar, dataset, recom_list, risk)
exit()
n_mcar = 50
vary_error(n_mcar, dataset, recom_list, risk)
from model import *
import config as cfg
import time
import os

loss,train_decode_result,pred_decode_result=build_network(is_training=True)
saver = tf.train.Saver()

sess = tf.Session()

ckpt = tf.train.latest_checkpoint(cfg.CKPT_DIR)
if ckpt:
    saver.restore(sess,ckpt)
    print('restore from the checkpoint{0}'.format(ckpt))
else:
    print('failed to load ckpt')
val_img,_=cfg.read_data(cfg.val_dir)
val_predict = sess.run(pred_decode_result,feed_dict={image: val_img})
predit = cfg.int2label(np.argmax(val_predict, axis=2))
print(predit)