def main(_): loader = Loader(flag="azenuz_small") config = Config(loader, flag="azenuz_small") config.gpu = 0 if platform.system() == 'Linux': gpuid = config.gpu os.environ["CUDA_VISIBLE_DEVICES"] = '{}'.format(gpuid) device = '/gpu:' + str(gpuid) else: device = '/cpu:0' lr_updater = LearningRateUpdater(config.learning_rate, config.decay, config.decay_epoch) i = 0 graph = tf.Graph() INFO_LOG("build model") with graph.as_default(): trainm = CTR_ggnn(config, device, loader, "Train") testm = CTR_ggnn(config, device, loader, "Valid") INFO_LOG("finish build model") session_config = tf.ConfigProto(allow_soft_placement=True, log_device_placement=False) session_config.gpu_options.allow_growth = True with tf.Session(graph=graph, config=session_config) as session: # session.run(tf.global_variables_initializer()) CTR_GNN_loader(session, config) best_auc = 0. best_logloss = 0. best_epoch_auc = 0. best_epoch_logloss = 0. auc = 0. for epoch in range(config.epoch_num): trainm.update_lr(session, lr_updater.get_lr()) cost, auc = run(session, config, trainm, loader, verbose=True) INFO_LOG("Epoch %d Train AUC %.3f" % (epoch + 1, auc)) INFO_LOG("Epoch %d Train costs %.3f" % (epoch, cost)) session.run(tf.local_variables_initializer()) cost, auc = run(session, config, testm, loader, verbose=True) INFO_LOG("Epoch %d Valid AUC %.3f" % (epoch, auc)) INFO_LOG("Epoch %d Valid cost %.3f" % (epoch, cost)) # # lr_updater.update(auc, epoch) if best_auc < auc: best_auc = auc best_epoch_auc = epoch CTR_GNN_saver(session, config, best_auc, best_epoch_auc) if best_logloss > cost: best_logloss = cost best_epoch_logloss = epoch # CTR_GNN_saver(session, config, best_epoch_logloss, best_epoch_logloss) INFO_LOG("*** best AUC now is %.3f in %d epoch" % (best_auc, best_epoch_auc)) INFO_LOG("*** best logloss now is %.3f in %d epoch" % (best_logloss, best_epoch_logloss)) if epoch % 5 == 0 and epoch != 0: loader.change_data_list(loader.increase_data_idx())
def main(_): loader = Loader(flag="as") config = Config(loader, flag="as") if platform.system() == 'Linux': gpuid = config.gpu os.environ["CUDA_VISIBLE_DEVICES"] = '{}'.format(gpuid) device = '/gpu:' + str(gpuid) else: device = '/cpu:0' lr_updater = LearningRateUpdater(config.learning_rate, config.decay, config.decay_epoch) i = 0 graph = tf.Graph() with graph.as_default(): trainm = DynGCN(config, device, loader, "Train") testm = DynGCN(config, device, loader, "Valid") session_config = tf.ConfigProto(allow_soft_placement=True, log_device_placement=False) session_config.gpu_options.allow_growth = True with tf.Session(graph=graph, config=session_config) as session: # print "!!!!!!!!!!!" session.run(tf.global_variables_initializer()) # print "*********" # trainm.load_last_time_embedding(loader.present_graph, session) # CTR_GNN_loader(session, config) best_f1_score = 0. best_auc_score = 0. best_epoch = 0 time_consume_t = 0. sum_time_consume = 0. for epoch in range(config.epoch_num): trainm.update_lr(session, lr_updater.get_lr()) # session.run(tf.local_variables_initializer()) # # cost, eavluation_result = run(session, config, trainm, loader, verbose=False) INFO_LOG("Epoch %d Train " % epoch + str(eavluation_result), epoch % 1 == 0) INFO_LOG("Epoch %d Train costs %.3f" % (epoch, cost), epoch % 100 == 0) session.run(tf.local_variables_initializer()) cost, eavluation_result = run(session, config, testm, loader, verbose=False) INFO_LOG("Epoch %d Valid " % epoch + str(eavluation_result), epoch % 1 == 0) INFO_LOG("Epoch %d Valid cost %.3f" % (epoch, cost), epoch % 1 == 0) # # auc = eavluation_result['auc'] f1_score = eavluation_result["f1_score"]["micro_f1_score"] lr_updater.update(f1_score, epoch) if best_f1_score < f1_score: best_f1_score = f1_score best_epoch = epoch DynGCN_saver(session, config, best_f1_score, best_epoch, "hep") INFO_LOG( "*** best f1_score now is %.5f in %d epoch" % (best_f1_score, best_epoch), True) INFO_LOG( "BEST Epoch %d Valid " % epoch + str(eavluation_result), True) if best_auc_score < auc: best_auc_score = auc INFO_LOG( "*** best auc now is %.5f in %d epoch" % (best_auc_score, epoch), True) INFO_LOG( "*** best f1_score now is %.4f in %d epoch" % (best_f1_score, best_epoch), epoch % 100 == 0) # time_consume_t += 1. # sum_time_consume += eavluation_result["time_consume"] # print("TIME CONSUME *** ", sum_time_consume/ time_consume_t) if epoch % 1 == 0 and epoch != 0: loader.change_2_next_graph_date()