def generate_graphs(eval_args): """ Generate graphs (networkx format) given a trained generative model and save them to a directory :param eval_args: ArgsEvaluate object """ train_args = eval_args.train_args if train_args.note == 'GraphRNN': gen_graphs = gen_graphs_graph_rnn(eval_args) elif train_args.note == 'DFScodeRNN': gen_graphs = gen_graphs_dfscode_rnn(eval_args) elif train_args.note == 'DGMG': gen_graphs = gen_graphs_dgmg(eval_args) if os.path.isdir(eval_args.current_graphs_save_path): shutil.rmtree(eval_args.current_graphs_save_path) os.makedirs(eval_args.current_graphs_save_path) save_graphs(eval_args.current_graphs_save_path, gen_graphs)
def train_model(train_dir, model_name, show_val): prepare_data(train_dir) if model_name == 'custom': model, history, val_pred_probs = train_custom_model(100, 64) elif model_name == 'bottleneck': override_keras_directory_iterator_next() generate_and_save_bottleneck_features(64) model, history, val_pred_probs = train_bottleneck_model(100, 64) elif model_name == 'finetune': override_keras_directory_iterator_next() model, history, val_pred_probs = train_fine_tune_model(30, 32) min_loss, min_loss_acc = find_min_loss_acc(history) postfix = '_{0:.2f}l_{1:.2f}a'.format(min_loss, min_loss_acc) # Rename model weights try: print("Renaming model weights.\n") tmp_path = cfg.model_weights_path.replace('.h5', '{}.h5'.format(postfix)) os.rename(cfg.model_weights_path, tmp_path) except Exception as e: print("Renaming model weights failed: {}".format(e)) # Save model architecture try: print("Saving model architecture.\n") save_model(model, postfix) except Exception as e: print("Saving model architecture failed: {}".format(e)) # Save loss and accuracy graphs try: print("Saving loss and accuracy graphs.\n") save_graphs(history, postfix) except Exception as e: print("Saving loss and accuracy graphs failed: {}".format(e)) # Save validation results try: print("Saving validation results.\n") # Get validation images val_imgs, val_labels, val_img_paths = load_train_dir(cfg.val_data_dir) # Reorder predictions probabilities if binary class if cfg.nb_classes == 2: val_pred_probs = np.append(1.0 - val_pred_probs, val_pred_probs, axis=1) # Get predictions labels val_pred_labels = np.argmax(val_pred_probs, axis=1) # Save validation results write_val_results(postfix, val_img_paths, val_labels, val_pred_probs, val_pred_labels) # Show misclassified validation images if show_val: show_mc_val_images(train_dir, val_labels, val_img_paths, val_pred_probs, val_pred_labels) except Exception as e: print("Saving validation results failed: {}".format(e))