def main(args): best_data = 1.0 gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction = 0.9) (feature, target) = load_txt(args.file_path) (test_fe, test_tg), (verif_fe, verif_tg), (train_fe, train_tg), test_index,train_index,verif_index = train_test_split( feature = feature, target = target ) net = network( axis = train_fe.shape[1], lr = args.lr ) saver = tf.train.Saver(max_to_keep = 1) np.savetxt(str(number)+"_test_index.csv", test_index) np.savetxt(str(number)+"_train_index.csv", train_index, delimiter = ",") np.savetxt(str(number)+"_verif_index.csv", verif_index, delimiter = ",") global sess_global with tf.Session( config=tf.ConfigProto(gpu_options = gpu_options) ) as sess: print("session assigned..........") saver.restore(sess, "check_point/model.ckpt") for epoch in range(args.epochs): train_loss = train( sess = sess, model = net, feature = train_fe, target = train_tg, batch_size = args.batch_size ) verif_loss = verif( sess = sess, model = net, feature = verif_fe, target = verif_tg, batch_size = args.batch_size ) print( "epoch {}, train_r2={:.4f}, train_mae={:.4f}, train_rmse={:.4f}, verif_r2={:.4f}, verif_mae={:.4f}, verif_rmse={:.4f}" .format(epoch, train_loss[1], train_loss[2], train_loss[3], verif_loss[1], verif_loss[2], verif_loss[3]) ) if verif_loss[2] < best_data: best_data = verif_loss[2] print("*"*10 + " save model " + "*"*10) saver.save(sess, "check_point/model.ckpt")
def main(args): best_data = 1.0 gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction = 0.9) (feature, target) = load_txt(args.file_path) (verif_fe, verif_tg), (train_fe, train_tg) = train_test_split( feature = feature, target = target, percent = args.percent ) net = network( axis = train_fe.shape[1], lr = args.lr ) saver = tf.train.Saver(max_to_keep = 1) with tf.compat.v1.Session( config=tf.ConfigProto(gpu_options = gpu_options) ) as sess: sess.run(tf.global_variables_initializer()) for epoch in range(args.epochs): train_loss = train( sess = sess, model = net, feature = train_fe, target = train_tg, batch_size = args.batch_size ) verif_loss = verif( sess = sess, model = net, feature = verif_fe, target = verif_tg, batch_size = args.batch_size ) print( "epoch {}, train_r2={:.4f}, train_mae={:.4f}, train_rmse={:.4f}, verif_r2={:.4f}, verif_mae={:.4f}, verif_rmse={:.4f}" .format(epoch, train_loss[1], train_loss[2], train_loss[3], verif_loss[1], verif_loss[2], verif_loss[3]) ) if verif_loss[2] < best_data: best_data = verif_loss[2] print("*"*10 + " save model " + "*"*10) saver.save(sess, "check_point/model.ckpt")
warnings.warn = warn number = 508 target_index = np.loadtxt(str(number) + "_test_index.csv").astype(np.float32) feature_number = 132 data = np.loadtxt(str(number) + "_magpie_target.csv", dtype=str, delimiter=",") target = data[:, 134:].astype(np.float32) target_spectrum = target[int(target_index)] elements = np.array(re.findall(r"\D+\D*", data[0, 0])) elements = elements[elements != '.'] print(elements) element_count = len(elements) net = network(axis=feature_number, lr=1e-3) saver = tf.train.Saver(max_to_keep=1) def magpie_feature(formula): data = [formula] df = pd.DataFrame(data, columns=["formula"]) df["composition"] = df["formula"].transform(str_to_composition) ep_feat = ElementProperty.from_preset(preset_name="magpie") df = ep_feat.featurize_dataframe(df, col_id="composition") df.drop(labels=["composition"], axis=1, inplace=True) return df.iloc[0, 1:].to_numpy() def target(x1, x2): formula = ''
gene_length = 7 element_count = len(elements) feature_number = 132 creator.create("FitnessMax", base.Fitness, weights=(1.0, )) creator.create("Individual", numpy.ndarray, fitness=creator.FitnessMax) toolbox = base.Toolbox() toolbox.register("attr_bool", random.randint, 0, 1) toolbox.register("individual", tools.initRepeat, creator.Individual, toolbox.attr_bool, n=gene_length * element_count) toolbox.register("population", tools.initRepeat, list, toolbox.individual) net = network(axis=132, lr=1e-3) saver = tf.train.Saver(max_to_keep=1) def magpie_feature(formula): data = [formula] df = pd.DataFrame(data, columns=["formula"]) df["composition"] = df["formula"].transform(str_to_composition) ep_feat = ElementProperty.from_preset(preset_name="magpie") df = ep_feat.featurize_dataframe(df, col_id="composition") df.drop(labels=["composition"], axis=1, inplace=True) return df.iloc[0, 1:].to_numpy() def evalresult(individual): formula = ''
def train(args, test_image, test=False): log_file = None if not test: log_file = open(args.log_file, 'w') minibatchs_X, minibatchs_Y = make_minibatches(*load_data(args.datafile), args.minibatch_size) print("Data loaded") log_file.write("Data loaded\n") tf.reset_default_graph() X = tf.placeholder(tf.float32, shape=[None, 160, 320, 3], name='X') Y = tf.placeholder(tf.float32, shape=[None, 4], name='Y') pred = network(X) with tf.name_scope('train'): loss = tf.reduce_mean(tf.pow(Y - pred, 2)) tf.summary.scalar('loss', loss) train_op = tf.train.AdamOptimizer().minimize(loss) summary = tf.summary.merge_all() init = tf.global_variables_initializer() saver = tf.train.Saver() with tf.Session() as sess: if test: restore_path = tf.train.latest_checkpoint('model') saver.restore(sess, restore_path) print('restored the model from' + str(restore_path)) pred_action = sess.run(pred, feed_dict={X: test_image}) return pred_action if args.restore: restore_path = tf.train.latest_checkpoint(args.save_dir) saver.restore(sess, restore_path) print('restored the model from' + str(restore_path)) log_file.write('restored the model from' + str(restore_path) + "\n") else: sess.run(init) summary_writer = tf.summary.FileWriter(args.summary_dir, sess.graph) train_start = time.time() print("Training started") log_file.write("Training started\n") for epoch in range(args.num_epochs): epoch_start = time.time() num_minibatches = len(minibatchs_X) losses = [] for minibatch in range(num_minibatches): _loss, _, _summ = sess.run([loss, train_op, summary], feed_dict={X: minibatchs_X[minibatch], Y: minibatchs_Y[minibatch]}) losses.append(_loss) print("Epoch " + str(epoch + 1) + " - Minibatch " + str(minibatch + 1) + " completed with loss = " + str(_loss)) log_file.write("Epoch " + str(epoch + 1) + " - Minibatch " + str(minibatch + 1) + " completed with loss = " + str(_loss) + "\n") summary_writer.add_summary(_summ) print("EPOCH " + str(epoch + 1) + " completed in " + str(time.time() - epoch_start)[:5] + " secs with average loss = " + str(sum(losses)/len(losses))) log_file.write("EPOCH " + str(epoch + 1) + " completed in " + str(time.time() - epoch_start)[:5] + " secs with average loss = " + str(sum(losses)/len(losses)) + "\n") save_path = saver.save(sess, args.save_dir + 'model.ckpt') print("Model saved in the dir " + str(save_path)) log_file.write("Model saved in the dir " + str(save_path) + "\n") print("Training Finished in " + str(time.time() - train_start)[:5]) log_file.write("Training Finished in " + str(time.time() - train_start)[:5] + "\n") log_file.close()