def _load_model(sess, model_folder): """ Loads the model for inference, if the given folder contains a protobuffer loads the serialized version :param sess: TF session :model_folder: The model folder :return: tuple with TF placeholders (image_input, logits, keep_prob) """ if helper.is_model_serialized(model_folder): graph = helper.load_serialized_model(sess, MODEL_NAME, _model_folder()) image_input = graph.get_tensor_by_name('image_input:0') keep_prob = graph.get_tensor_by_name('keep_prob:0') model_output = graph.get_tensor_by_name('model_output:0') else: vgg_path = helper.maybe_download_pretrained_vgg(FLAGS.data_dir) image_input, keep_prob, layer3, layer4, layer7 = load_vgg( sess, vgg_path) model_output = layers(layer3, layer4, layer7, CLASSES_N) helper.load_model(sess, _model_folder()) logits = tf.reshape(model_output, (-1, CLASSES_N)) return image_input, logits, keep_prob
def __init__(self): self.detection_graph = None self.tl_map = { 1: TrafficLight.GREEN, 2: TrafficLight.RED, 3: TrafficLight.YELLOW, 4: TrafficLight.UNKNOWN } self.detection_graph = tf.Graph() model_path = rospy.get_param('~pb_path') load_model(model_path, self.detection_graph) # Model warmup - run a dummy classification against a random image rospy.loginfo("Model warmup starting") with tf.Session(graph=self.detection_graph) as sess: image_tensor = sess.graph.get_tensor_by_name('image_tensor:0') detection_boxes = sess.graph.get_tensor_by_name( 'detection_boxes:0') detection_scores = sess.graph.get_tensor_by_name( 'detection_scores:0') detection_classes = sess.graph.get_tensor_by_name( 'detection_classes:0') # gen_image = np.uint8(np.random.randn(1, 600, 800, 3)) sess.run([detection_boxes, detection_scores, detection_classes], feed_dict={image_tensor: gen_image}) # rospy.loginfo("Model warmup done")
def main(): ap = argparse.ArgumentParser(description='predict.py') ap.add_argument('image_path', nargs='*', action="store", default="./flowers", type=str) ap.add_argument('checkpoint', action="store", type=str) ap.add_argument('--top_k', dest="top_k", action="store", type=int, default=1) ap.add_argument('--category_names', dest="category_names", action="store", type=str) ap.add_argument('--gpu', dest="gpu", action='store_true') args = ap.parse_args() if args.category_names: with open(args.category_names, 'r') as f: cat_to_name = json.load(f) else: cat_to_name = None model = helper.load_model(args.checkpoint) ps, cl = helper.predict(args.image_path[0], model, args.top_k, args.gpu) helper.show_results(ps, cl, args.top_k, cat_to_name)
def main(image_path, saved_model_path, top_k=1, json_path=None): ''' Input: image_path, saved_model_path, top_k, json_path Output: plot with the photo provided with the predicted class as title, together with a histogram with top_k most likely classes and their probabilities. ''' model = load_model(saved_model_path) probs, class_probs, image = predict(image_path, model, top_k) plot(image, probs, class_probs, json_path, top_k)
def evaluate_model(args, params): loader_params = { 'batch_size': params['batch_size'], 'shuffle': False, 'num_workers': params['num_workers'] } batch_size = params['batch_size'] _, _, test_loader = data_handler.init_data_loaders(params, loader_params) total_predicted = np.zeros((batch_size, 14)) total_labels = np.zeros((batch_size, 14)) path_to_load = helper.compute_paths( args, params)['save_path'] # compute path from args and params net = networks.init_unified_net(args.model, params) net, _ = helper.load_model(path_to_load, args.epoch, net, optimizer=None, resume_train=False) print( f'In [evaluate_model]: loading the model done from: "{path_to_load}"') print( f'In [evaluate_model]: starting evaluation with {len(test_loader)} batches' ) with torch.no_grad(): for i_batch, batch in enumerate(test_loader): img_batch = batch['image'].to(device).float() label_batch = batch['label'].to(device).float() pred = net(img_batch) if i_batch > 0: total_predicted = np.append(total_predicted, pred.cpu().detach().numpy(), axis=0) total_labels = np.append(total_labels, label_batch.cpu().detach().numpy(), axis=0) else: total_predicted = pred.cpu().detach().numpy() total_labels = label_batch.cpu().detach().numpy() if i_batch % 50 == 0: print( f'In [evaluate_model]: prediction done for batch {i_batch}' ) results_path = helper.compute_paths(args, params)['results_path'] helper.make_dir_if_not_exists(results_path) # plot roc print(f'In [evaluate_model]: starting plotting ROC...') plotting.plot_roc(total_predicted, total_labels, pathology_names, results_path)
def main(): stop_words = get_stop_words(STOP_WORDS_PATH) data = Initialize_Data(); visualizer = Visualize(); data.initialize_twitter_posts(TWITTER_POSTS_CSV, TWITTER_DATA_DIR) data.initialize_facebook_posts(FACEBOOK_POSTS_CSV, FACEBOOK_DATA_DIR) # Visalize daya df = np.array(data.posts); lf= np.array(data.labels); pos_ind = lf == "positive"; neg_ind = lf == "negative" pos = df[pos_ind] neg = df[neg_ind] visualizer.plot_data_distibution([pos.shape[0], neg.shape[0]], ["positive", "negative"], "Training set distribution") # Cleanup posts text_Cleanuper = Posts_Cleansing(data) text_Cleanuper.cleanup(Text_Cleanuper()) # Train and Test Model clf = train_test_model(create_ngram_model(frozenset(stop_words)), np.array(data.posts), np.array(data.labels) == "positive") # Find best Model params and train clf = grid_search_model(create_ngram_model, np.array(data.posts), np.array(data.labels) == "positive", frozenset(stop_words)) print('Saving model') save_model(clf, NAIVE_BAYES_MODEL_PATH); print('Loading model') trained_model = load_model(NAIVE_BAYES_MODEL_PATH) train_test_model(trained_model, np.array(data.posts), np.array(data.labels) == "positive") importance = get_most_important_features(trained_model.named_steps['vect'].vocabulary_.items(), trained_model.named_steps['clf'], 10) top_scores = [a[0] for a in importance[0]['tops']] top_words = [a[1] for a in importance[0]['tops']] bottom_scores = [a[0] for a in importance[0]['bottom']] bottom_words = [a[1] for a in importance[0]['bottom']] visualizer.plot_important_words(top_scores, top_words, bottom_scores, bottom_words, "Most important words for relevance") Y_predicted_word2vec = trained_model.predict(["Նա վատ աղջիկ է"]) print(Y_predicted_word2vec)
def main(): res = load_model("bert_en_cased_L-12_H-768_A-12-v3", verbose=1) pathname, output, inputs, outputs, onnx_inputs = res output_names = outputs structured_outputs = [ "answer_types", "tf_op_layer_end_logits", "tf_op_layer_start_logits", "unique_ids" ] perf_iter = 5 rtol = 0.01 atol = 0.0001 print("[main] testing ONNX %r" % output) m = rt.InferenceSession(output) results_onnx = m.run(output_names.split(','), onnx_inputs) print("[main] got results, testing perf") start = time.time() for _ in range(perf_iter): _ = m.run(output_names.split(','), onnx_inputs) onnx_runtime_ms = (time.time() - start) / perf_iter * 1000 print("[main] ONNX perf:", onnx_runtime_ms) print("[main] loading TF") imported = tf.saved_model.load(".", tags=['serve']) concrete_func = imported.signatures["serving_default"] tf_inputs = {} for k, v in onnx_inputs.items(): tf_inputs[k.split(":")[0]] = tf.constant(v) tf_func = tf.function(concrete_func) print("[main] running TF") tf_results_d = tf_func(**tf_inputs) #results_tf = [tf_results_d[output].numpy() for output in structured_outputs] print("[main] got results, testing perf") start = time.time() for _ in range(perf_iter): _ = concrete_func(**tf_inputs) tf_runtime_ms = (time.time() - start) / perf_iter * 1000 print("[main] TF perf:", tf_runtime_ms) # for tf_res, onnx_res in zip(results_tf, results_onnx): # np.testing.assert_allclose(tf_res, onnx_res, rtol=rtol, atol=atol) print("[main] Results match") print('[main] device', rt.get_device(), rt.__version__, rt.__file__) print("[main] TF perf, ONNX perf, ratio") print("[main]", tf_runtime_ms, onnx_runtime_ms, tf_runtime_ms / onnx_runtime_ms)
def __init__(self): root_path = os.path.dirname(os.path.dirname(__file__)) if torch.cuda.is_available(): self.device = torch.device("cuda") gpu_ids = list(range(torch.cuda.device_count())) else: self.device = torch.device("cpu") gpu_ids = [] state_dict = torch.load(os.path.join(root_path, "models", "trained_models", "densenet201.pth"), map_location=self.device) self.model = load_model(25, state_dict) self.model = self.model.to(self.device)
def classify_image(path_to_image, checkpoint, top_k, category_names, gpu): if not torch.cuda.is_available() and gpu: raise ( "No gpu available to train the network. Please remove the --gpu argument to train using the cpu" ) device = ('cuda' if gpu else 'cpu') model = helper.load_model(checkpoint) image_tensor = torch.tensor(helper.load_image(path_to_image)) (probs, classes) = helper.predict(image_tensor, model, top_k, device) if category_names != None: #convert the classes array to hold the string representation of the category with open(category_names, 'r') as f: cat_to_name = json.load(f) classes = [cat_to_name[class_] for class_ in classes] return (classes, probs)
def test_gray_scale(): # path = 'data_big_original/extracted/images' path = 'data_big' h5_file = 'chest_xray.h5' partition, labels, labels_hot = \ data_handler.read_already_partitioned(h5_file) # just one image that exists now partition = {'train': [], 'validation': [], 'test': ['00000001_000.png']} # labels, labels_hot = None, None preprocess = helper.preprocess_fn(no_crop=True) # does not crop the images device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') loader_params = {'batch_size': 1, 'shuffle': False, 'num_workers': 0} _, _, test_loader = data_handler.create_data_loaders(partition, labels, labels_hot, path, preprocess, device, loader_params, scale='gray') # model_path = "models/max_epochs=30_batch_size=256_pool_mode=max_lr=5e-05_no_crop=True/unified_net_epoch_23.pt" # model_path = 'models/max_epochs=30_batch_size=256_pool_mode=max_lr=0.0001_no_crop=True/unified_net_epoch_8.pt' model_path = "models/max_epochs=50_batch_size=128_pool_mode=max_lr=0.0001_no_crop=True_es=True_26253292/unified_net_epoch_1.pt" # reading the other params from the JSON file with open('params.json', 'r') as f: params = json.load(f) net = helper.load_model(model_path, device, params['transition_params'], 'resnet34') for _, batch in enumerate(test_loader): image = batch['image'].to(device).float() print(f'image shape: {image.shape}') model_name = model_path.split("/")[1] helper.plot_heatmaps(image, net, resize_dim=(256, 256), save_path=f'figures/{model_name}/', show_or_save='show')
if __name__ == "__main__": dict_path = model_path = args.output_base_path + args.task+'/' dict_path += 'dictionary.p' model_path += args.model_file_name #'model_best.pth.tar' dictionary = helper.load_object(dict_path) embeddings_index = helper.load_word_embeddings(args.word_vectors_directory, args.word_vectors_file, dictionary.word2idx) model = BCN(dictionary, embeddings_index, args) if args.cuda: torch.cuda.set_device(args.gpu) model = model.cuda() print('loading model') helper.load_model(model, model_path, 'state_dict', args.cuda) print('vocabulary size = ', len(dictionary)) task_names = ['snli', 'multinli'] if args.task == 'allnli' else [args.task] for task in task_names: test_corpus = data.Corpus(args.tokenize) if 'IMDB' in args.task: ############################################################################### # Load Learning to Skim paper's Pickle file ############################################################################### train_d, dev_d, test_d = helper.get_splited_imdb_data(args.output_base_path+task+'/'+'imdb.p') test_corpus.parse(test_d, task, args.max_example) # test_corpus.parse(args.output_base_path + task + '/' + args.test + '.txt', 'RT', args.max_example) #although IMDB but selected text saved by budget model from theano in 'RT' format
def train(): # 配置 Tensorboard,每次训练的结果保存在以日期时间命名的文件夹中。 print("Configuring TensorBoard and Saver...") tensorboard_dir = 'tensorboard/textcnn' + '/' + time.strftime( '%Y%m%d%H%M', time.localtime(time.time())) if not os.path.exists(tensorboard_dir): os.makedirs(tensorboard_dir) tf.summary.scalar("loss", model.loss) tf.summary.scalar("accuracy", model.acc) merged_summary = tf.summary.merge_all() writer = tf.summary.FileWriter(tensorboard_dir) # 载入训练集与验证集 print("Loading training and validation data...") start_time = time.time() x_train, y_train, x_val, y_val = load_data(temp_dir, train_dir, val_dir, word_to_id, cat_to_id, config.seq_length) time_dif = get_time_dif(start_time) print("Time usage:", time_dif) total_batch = tf.Variable(0, trainable=False) # 总批次,不可训练的变量 # 创建session session = tf.Session() # 导入权重 saver = load_model(session, save_dir) # 图写入tensorboard writer.add_graph(session.graph) print('Training and evaluating...') start_time = time.time() best_acc_val = 0.0 # 最佳验证集准确率 last_improved = session.run(total_batch) # 记录上一次提升批次 require_improvement = 5000 # 如果超过1000轮未提升,提前结束训练 flag = False for epoch in range(config.num_epochs): print('Epoch:', epoch + 1) batch_train = batch_iter(x_train, y_train, config.batch_size) for x_batch, y_batch in batch_train: feed_dict = feed_data(model, x_batch, y_batch, config.dropout_keep_prob) if session.run(total_batch) % config.save_per_batch == 0: # 每多少轮次将训练结果写入tensorboard scalar s = session.run(merged_summary, feed_dict=feed_dict) writer.add_summary(s, session.run(total_batch)) if session.run(total_batch) % config.print_per_batch == 0: # 每多少轮次输出在训练集和验证集上的性能 loss_train, F1_train, _, _ = evaluate(session, model, x_train, y_train) loss_val, F1_val, _, _ = evaluate(session, model, x_val, y_val) # todo if F1_val > best_acc_val: # 保存最好结果 best_acc_val = F1_val last_improved = session.run(total_batch) saver.save(sess=session, save_path=save_path) improved_str = '*' else: improved_str = '' time_dif = get_time_dif(start_time) msg = 'Iter: {0:>6}, Train Loss: {1:>6.2}, Train F1: {2:>7.2%},' \ + ' Val Loss: {3:>6.2}, Val F1: {4:>7.2%}, Time: {5} {6}' print( msg.format(session.run(total_batch), loss_train, F1_train, loss_val, F1_val, time_dif, improved_str)) session.run(model.optim, feed_dict=feed_dict) # 运行优化 session.run(tf.assign( total_batch, total_batch + 1)) # 用tf.assign迭代total_batch可以在saver中记录total_batch的变化 if session.run(total_batch) - last_improved > require_improvement: # 验证集正确率长期不提升,提前结束训练 print("No optimization for a long time, auto-stopping...") flag = True break # 跳出循环 if flag: # 同上 break session.close()
if l2_status[i]: name += '_l2' if bn_status[i]: name += '_bn' if noise: name += '_noise' if adv_status[n]: name += '_adv' print(name) model_path = utils.make_directory(params_path, model_name) file_path = os.path.join(model_path, name) # load model parameters model_layers, optimization, _ = helper.load_model( model_name, input_shape, dropout_status[i], l2_status[i], bn_status[i]) # build neural network class nnmodel = nn.NeuralNet(seed=247) nnmodel.build_layers(model_layers, optimization, supervised=True) nntrainer = nn.NeuralTrainer(nnmodel, save='best', file_path=file_path) # initialize session sess = utils.initialize_session()
def train_nn(sess, global_step, epochs, batch_size, get_batches_fn, batches_n, train_op, cross_entropy_loss, prediction_op, metrics, metrics_reset_op, image_input, labels, keep_prob, learning_rate, save_model_freq=None, tensorboard_freq=None): """ Train neural network and print out the loss during training. :param sess: TF Session :param global_step: TF Placeholder containing the global step :param epochs: Number of epochs :param batch_size: Batch size :param get_batches_fn: Function to get batches of training data. Call using get_batches_fn(batch_size) :param batches_n: Number of batches to cover all the samples :param train_op: TF Operation to train the neural network :param cross_entropy_loss: TF Tensor for the amount of loss :param prediction_op: TF Tensor for the prediction class (index) :param metrics: Dictionary with the evaluation metrics :param metric_reset_op: TF Tensor used to reset the metrics counters :param image_input: TF Placeholder for input images :param labels: TF Placeholder for label images :param keep_prob: TF Placeholder for dropout keep probability :param learning_rate: TF Placeholder for learning rate :param save_model_freq: The frequency to save the model to disk, None to disable :param tensorboard_freq: The frequency to push the summaries to tensorboard, None to disable """ model_folder = _model_folder() if save_model_freq and helper.checkpoint_exists(model_folder): print( 'Checkpoint exists, restoring model from {}'.format(model_folder)) helper.load_model(sess, model_folder) else: sess.run(tf.global_variables_initializer()) sess.run(tf.local_variables_initializer()) if save_model_freq: saver = tf.train.Saver(max_to_keep=MODELS_LIMIT) iou_mean, iou_op = metrics['iou'] acc_mean, acc_op = metrics['acc'] # Evaluate current step step = global_step.eval(session=sess) start_step = step if tensorboard_freq: # Creates the tensorboard writer train_writer = _summary_writer(sess, model_folder) # Gets the batch of images/labels to feed to the image summary op summary_images, summary_labels = helper.image_summary_batch( os.path.join(FLAGS.data_dir, 'data_road', 'training'), IMAGE_SHAPE, TENSORBOARD_MAX_IMG) # Setup the summary ops summary_op, image_summary_op = _setup_summaries( sess, train_writer, image_input, labels, keep_prob, cross_entropy_loss, prediction_op, iou_mean, acc_mean, summary_images, summary_labels, step, CLASSES_N) training_log = [] print('Model folder: {}'.format(model_folder)) print( 'Training (First batch: {}, Epochs: {}, Batch Size: {}, Learning Rate: {}, Dropout: {}, L2 Reg: {}, Scaling: {})' .format(step + 1, FLAGS.epochs, FLAGS.batch_size, FLAGS.learning_rate, FLAGS.dropout, FLAGS.l2_reg, 'ON' if FLAGS.scale else 'OFF')) best_loss = 9999 ep_loss_incr = 0 start = time.time() for epoch in range(epochs): total_loss = 0 mean_loss = 9999 mean_acc = 0 mean_iou = 0 images_n = 0 # Resets the metrics variables at the beginning of the epoch sess.run(metrics_reset_op) batches = tqdm( get_batches_fn(batch_size), desc= 'Epoch {}/{} (Step: {}, Samples: N/A, Loss: N/A, Acc: N/A, IoU: N/A)' .format(epoch + 1, epochs, step), unit='batches', total=batches_n) for batch_images, batch_labels in batches: feed_dict = { image_input: batch_images, labels: batch_labels, keep_prob: (1.0 - FLAGS.dropout), learning_rate: FLAGS.learning_rate } # Train _ = sess.run(train_op, feed_dict=feed_dict) images_n += len(batch_images) # Evaluate loss, _, mean_iou, _, mean_acc = sess.run( [cross_entropy_loss, iou_op, iou_mean, acc_op, acc_mean], feed_dict={ image_input: batch_images, labels: batch_labels, keep_prob: 1.0 }) step = global_step.eval(session=sess) total_loss += loss * len(batch_images) mean_loss = total_loss / images_n # Saves metrics for tensorboard if tensorboard_freq: # Updates the summary according to frequency if step % tensorboard_freq == 0: training_summary = sess.run(summary_op, feed_dict={ image_input: batch_images, labels: batch_labels, keep_prob: 1.0 }) train_writer.add_summary(training_summary, global_step=step) # Writes the image every epoch if step % batches_n == 0: image_pred_summary = sess.run(image_summary_op, feed_dict={ image_input: summary_images, labels: summary_labels, keep_prob: 1.0 }) train_writer.add_summary(image_pred_summary, global_step=step) train_writer.flush() batches.set_description( 'Epoch {}/{} (Step: {}, Samples: {}, Loss: {:.4f}, Acc: {:.4f}, IoU: {:.4f})' .format(epoch + 1, epochs, step, images_n, mean_loss, mean_acc, mean_iou)) training_log.append((mean_loss, mean_acc, mean_iou)) if mean_loss < best_loss: ep_loss_incr = 0 best_loss = mean_loss else: ep_loss_incr += 1 if FLAGS.early_stopping is not None and ep_loss_incr >= FLAGS.early_stopping: print( 'Early Stopping Triggered (Loss not decreasing in the last {} epochs)' .format(ep_loss_incr)) break if save_model_freq and (epoch + 1) % save_model_freq == 0: helper.save_model(sess, saver, MODEL_NAME, model_folder, global_step) log_data = _to_log_data(training_log, start_step, step, batches_n) helper.save_log(log_data, model_folder) helper.plot_log(log_data, model_folder) elapsed = time.time() - start print( 'Training Completed ({:.1f} s): Last batch: {}, Loss: {:.4f}, Acc: {:.4f}, IoU: {:.4f}' .format(elapsed, step, mean_loss, mean_acc, mean_iou)) if save_model_freq: helper.save_model(sess, saver, MODEL_NAME, model_folder, global_step) log_data = _to_log_data(training_log, start_step, step, batches_n) helper.save_log(log_data, model_folder) helper.plot_log(log_data, model_folder)
from torch import randn from torch import onnx from linknet_batch import linknet_batch_model from helper import load_model exp_model = linknet_batch_model() exp_model = load_model(exp_model, model_dir="linknet_10epch.pt") dummy_input = randn(1, 3, 512, 512) onnx.export(exp_model, dummy_input, "linknet.onnx")
if __name__ == '__main__': parMapNet = ParametersMapNet() parIL = Parameters_IL() action_list = np.asarray(parMapNet.action_list) if parIL.use_predefined_test_set: # Open predefined initial configurations and load them in avd avd = AVD_online(par=parIL, nStartPos=0, scene_list=parIL.predefined_test_scenes, action_list=action_list, init_configs=parIL.predefined_confs_file) else: # sample random starting positions and targets from the AVD_online class avd = AVD_online(par=parIL, nStartPos=10, scene_list=["Home_001_1"], action_list=action_list) test_ids = list(range(len(avd))) # Need to load the trained MapNet if parIL.finetune_mapNet: # choose whether to use a finetuned mapNet model or not mapNet_model = hl.load_model(model_dir=parIL.model_dir, model_name="MapNet", test_iter=parIL.test_iters) else: mapNet_model = hl.load_model(model_dir=parIL.mapNet_model_dir, model_name="MapNet", test_iter=parIL.mapNet_iters) if parIL.use_ego_obsv: ego_encoder = Encoder() ego_encoder.cuda() ego_encoder.eval() else: ego_encoder = None evaluate_NavNet(parIL, parMapNet, mapNet_model, ego_encoder, test_iter=parIL.test_iters, test_ids=test_ids, test_data=avd, action_list=action_list)
def main(): stop_words = get_stop_words(STOP_WORDS_PATH) data = Initialize_Data() visualizer = Visualize() data.initialize_twitter_posts(TWITTER_POSTS_CSV, TWITTER_DATA_DIR) data.initialize_facebook_posts(FACEBOOK_POSTS_CSV, FACEBOOK_DATA_DIR) # Cleanup posts text_Cleanuper = Posts_Cleansing(data) text_Cleanuper.cleanup(Text_Cleanuper()) # Divide data into test and train set X_train, X_test, Y_train, Y_test = train_test_split(data.posts, data.labels, test_size=0.2, random_state=40) # Bag of Words model vectorization bag_of_words_model = Bag_Of_Words(X_train) bag_of_words_model.build_vectorizer(stop_words) X_train_counts = bag_of_words_model.data_counts X_test_counts = bag_of_words_model.vectorizer.transform(X_test) forest = RandomForestClassifier(n_estimators=100) forest = forest.fit(X_train_counts, Y_train) y_predicted_counts_train = forest.predict(X_train_counts) accuracy, precision, recall, f1 = get_metrics(Y_train, y_predicted_counts_train) print("Train accuracy = %.3f, precision = %.3f, recall = %.3f, f1 = %.3f" % (accuracy, precision, recall, f1)) y_predicted_counts = forest.predict(X_test_counts) accuracy, precision, recall, f1 = get_metrics(Y_test, y_predicted_counts) print("Test accuracy = %.3f, precision = %.3f, recall = %.3f, f1 = %.3f" % (accuracy, precision, recall, f1)) # Find best hyperparams # Number of trees in random forest n_estimators = [int(x) for x in np.linspace(start=200, stop=2000, num=10)] # Number of features to consider at every split max_features = ['auto', 'sqrt'] # Maximum number of levels in tree max_depth = [int(x) for x in np.linspace(10, 110, num=11)] max_depth.append(None) # Minimum number of samples required to split a node min_samples_split = [2, 5, 10] # Minimum number of samples required at each leaf node min_samples_leaf = [1, 2, 4] # Method of selecting samples for training each tree bootstrap = [True, False] # Create the random grid random_grid = { 'n_estimators': n_estimators, 'max_features': max_features, 'max_depth': max_depth, 'min_samples_split': min_samples_split, 'min_samples_leaf': min_samples_leaf, 'bootstrap': bootstrap } # First create the model to tune rf = RandomForestClassifier() rf_random = RandomizedSearchCV(estimator=rf, param_distributions=random_grid, n_iter=100, cv=3, verbose=2, random_state=42, n_jobs=-1) # Fit the random search model rf_random.fit(X_train_counts, Y_train) print('Get Best Params') print(rf_random.best_params_) print('Saving model') save_model(rf_random, RANDOM_FOREST_MODEL_PATH) print('Load model') trained_model = load_model(RANDOM_FOREST_MODEL_PATH) y_predicted_counts_train = trained_model.predict(X_train_counts) accuracy, precision, recall, f1 = get_metrics(Y_train, y_predicted_counts_train) print( "Train accuracy = %.3f, precisionս = %.3f, recall = %.3f, f1 = %.3f" % (accuracy, precision, recall, f1)) y_predicted_counts = trained_model.predict(X_test_counts) accuracy, precision, recall, f1 = get_metrics(Y_test, y_predicted_counts) print("Test accuracy = %.3f, precision = %.3f, recall = %.3f, f1 = %.3f" % (accuracy, precision, recall, f1))
# --- # # 检查点 # # 通过运行上面的训练单元,你的模型已经以`trained_rnn`名字存储,如果你存储了你的notebook, **你可以在之后的任何时间来访问你的代码和结果**. 下述代码可以帮助你重载你的结果! # In[26]: """ DON'T MODIFY ANYTHING IN THIS CELL """ import torch import helper import problem_unittests as tests _, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess() trained_rnn = helper.load_model('./save/trained_rnn') # ## 生成电视剧剧本 # 你现在可以生成你的“假”电视剧剧本啦! # # ### 生成文字 # 你的神经网络会不断重复生成一个单词,直到生成满足你要求长度的剧本。使用 `generate` 函数来完成上述操作。首先,使用 `prime_id` 来生成word id,之后确定生成文本长度 `predict_len`。同时, topk 采样来引入文字选择的随机性! # In[27]: """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ import torch.nn.functional as F def generate(rnn,
n_layers, dropout=0.5) if train_on_gpu: rnn.cuda() # training the model trained_rnn = train_rnn(rnn, batch_size, optimizer, criterion, num_epochs, show_every_n_batches) # saving the trained model helper.save_model('./modl/trained_rnn', trained_rnn) print('Model Trained and Saved') _, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess() trained_rnn = helper.load_model('./model/trained_rnn') # run the cell multiple times to get different results! gen_length = 400 # modify the length to your preference prime_word = 'george' # name for starting the script pad_word = helper.SPECIAL_WORDS['PADDING'] generated_script = generate(trained_rnn, vocab_to_int[prime_word + ':'], int_to_vocab, token_dict, vocab_to_int[pad_word], gen_length) print(generated_script) # save script to a text file f = open("generated_script_george.txt", "w") f.write(generated_script) f.close()
import torch import helper import numpy as np import torch.nn.functional as F from tv_scripts_rnn import RNN train_on_gpu = torch.cuda.is_available() _, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess() trained_rnn = helper.load_model('trained_rnn', is_gpu=train_on_gpu) def generate(rnn, prime_id, int_to_vocab, token_dict, pad_value, predict_len=100): """ Generate text using the neural network :param decoder: The PyTorch Module that holds the trained neural network :param prime_id: The word id to start the first prediction :param int_to_vocab: Dict of word id keys to word values :param token_dict: Dict of puncuation tokens keys to puncuation values :param pad_value: The value used to pad a sequence :param predict_len: The length of text to generate :return: The generated text """ rnn.eval() sequence_length = 10 # create a sequence (batch_size=1) with the prime_id
results = {} for model_name in model_names: for activation in activations: base_name = model_name+'_'+activation print(base_name) results[base_name] = {} trial_roc_mean = [] trial_roc_std = [] trial_pr_mean = [] trial_pr_std = [] for trial in range(num_trials): keras.backend.clear_session() # load model model = helper.load_model(model_name, activation=activation) name = base_name+'_'+str(trial) print('model: ' + name) # compile model helper.compile_model(model) # setup callbacks callbacks = helper.get_callbacks(monitor='val_auroc', patience=20, decay_patience=5, decay_factor=0.2) # fit model history = model.fit(x_train, y_train, epochs=100, batch_size=100, shuffle=True,
import sys import pandas as pd from tqdm import tqdm sys.path.append(os.getcwd()) import helper from Config import UPLOAD_FOLDER, FN_DF_TRANSFORMED app = Flask(__name__) app.secret_key = "secret key" app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER app.config['MAX_CONTENT_LENGTH'] = 16 * 1024 * 1024 ALLOWED_EXTENSIONS = set(['pdf', 'png', 'jpg', 'jpeg', 'gif']) trained_densenet_model = helper.load_model() def allowed_file(filename): return '.' in filename and filename.rsplit( '.', 1)[1].lower() in ALLOWED_EXTENSIONS @app.route('/') def upload_form(): return render_template('upload.html') @app.route('/', methods=['POST']) def upload_file(): if request.method == 'POST':
from torchvision import utils from linknet import linknet_model from helper import load_model, make_mask_overlay, get_ids_from_file_in_list import time import cv2 import numpy as np thrs = 0.45400802 upper = 1 lower = 0 start_time = time.time() segm_model = linknet_model() segm_model = load_model(segm_model, model_dir="linknet_10epch.pt") direc = "./TestImages/" saved_dir = "./TestResultsImagesLink/" list_ids = get_ids_from_file_in_list(direc) for img_id in list_ids: img = Image.open(direc + img_id) trf = transforms.Compose([transforms.Resize([512, 512]), transforms.ToTensor()]) img_input = trf(img) img_input = img_input.unsqueeze(dim=0) output = segm_model(img_input) output = torch.sigmoid(output) inference_time = time.time() - start_time print("Time to do inference was: {:.0f} seconds and {:.5f} micro seconds ".format(inference_time, inference_time % 60)) output = output.squeeze(0)
num_epochs = adv[0] num_clean_epochs = adv[1] prob_clean = adv[2] tf.reset_default_graph() # compile neural trainer name = model_name+'_adv' + str(idx) print('model: ' + name) file_path = os.path.join(model_path, name) # load model parameters model_layers, optimization, _ = helper.load_model(model_name, input_shape, output_shape) # build neural network class nnmodel = nn.NeuralNet() nnmodel.build_layers(model_layers, optimization, supervised=True) grad_tensor = tf.gradients(nnmodel.mean_loss, nnmodel.placeholders['inputs'])[0] xx = nnmodel.placeholders['inputs'] yy = nnmodel.placeholders['targets'] is_train = nnmodel.placeholders['is_training'] loss = nnmodel.mean_loss # nnmodel.inspect_layers() performance = nn.MonitorPerformance('train', optimization['objective'], verbose) performance.set_start_time(start_time = time.time())
train_y, dev_y, test_y = datasets_y hp.saveLogMsg("#Train={}, #Dev={}, #Test={}".format(len(train_x), len(dev_x), len(test_x))) # Finding Labels in Dataset hp.saveLogMsg("\nFinding labels...") labels = [each_y for sample_y in train_y for each_y in sample_y] labels = list(set(labels)) labels.remove('O') hp.saveLogMsg("#Labels={}\n".format(len(labels) + 1)) # Run Model handler = CRFHandler(labels) model = None if cf.MODE == "test" and os.path.exists(cf.MODEL_PATH): model = hp.load_model() hp.saveLogMsg("\nLoading best model from {}".format(cf.MODEL_PATH)) else: model = handler.train(train_x, train_y) hp.save_model(model) hp.saveLogMsg("\nSaving best model at {}".format(cf.MODEL_PATH)) assert model is not None # Eval Model if cf.TEST_LABELED: acc_score, clf_report = handler.evaluate(model, dev_x, dev_y) hp.saveLogMsg('\n[DEV] Accuracy Score: {}'.format(acc_score)) hp.saveLogMsg('\n[DEV] Classification Report: \n{}'.format(clf_report)) else: handler.predict(model, test_x) hp.saveLogMsg('\nSaving prediction at {}'.format(cf.PREDICT_PATH))
print( 'number of trainable parameters = ', numpy.sum(list(param_dict_selector.values())), numpy.sum(list(param_dict.values())), numpy.sum(list(param_dict.values())) + numpy.sum(list(param_dict_selector.values()))) if args.cuda: torch.cuda.set_device(args.gpu) selector = selector.cuda() model = model.cuda() if args.load_model == 0 or args.load_model == 2: print('loading selector') helper.load_model( selector, args.output_base_path + args.task + '/' + args.selector_file_name, 'selector', args.cuda) if args.load_model == 1 or args.load_model == 2: print('loading classifier') helper.load_model( model, args.output_base_path + args.task + '/' + args.classifier_file_name, 'state_dict', args.cuda) if args.resume: if os.path.isfile(args.resume): print("=> loading checkpoint '{}'".format(args.resume)) checkpoint = helper.load_checkpoint(args.resume) args.start_epoch = checkpoint['epoch'] best_acc = checkpoint['best_acc'] selector.load_state_dict(checkpoint['selector'])
def evaluate_iobb(args, params, img_name=None, img_disease=None): """ Goes through every image in the BBox csv file, and calculates IoBB. Results are stored in data/iobb.npy together with image name and disease type. :param img_name: If given, only load one image and plot bbox and heatmap. :param img_disease: Which disease to plot bbox for, used together with img_name. """ pathologies = { 'Atelectasis': 0, 'Cardiomegaly': 1, 'Effusion': 4, 'Infiltrate': 8, 'Mass': 9, 'Nodule': 10, 'Pneumonia': 12, 'Pneumothorax': 13 } ############################################ ### Load model checkpoint and get heatmap. ############################################ path_to_load = helper.compute_paths(args, params)['save_path'] net = networks.init_unified_net(args.model, params) net, _ = helper.load_model(path_to_load, args.epoch, net, optimizer=None, resume_train=False) # Get bbox_data from csv file. f = open('../data/BBox_List_2017.csv', 'rt') reader = csv.reader(f) rows = list(reader)[1:] # ignoring the first row because it is the titles # A list of tuples (img_name, disease_index, iobb, num_bboxes) results = [] for i, img_data in enumerate(rows): # Make sure image exists. file_path = f'../data/extracted/images/{img_data[0]}' if not os.path.isfile(file_path): continue # If only loading one image, check if this row contains the img and correct disease, otherwise continue. if img_name is not None: if img_data[0] != img_name: continue if img_disease is not None and pathologies[ img_data[1]] != img_disease: continue ############################################ ### Load image and turn into tensor. ############################################ xray_img = Image.open(file_path) rgb = Image.new('RGB', xray_img.size) rgb.paste(xray_img) preprocess = transforms.Compose([ transforms.Resize(256), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ]) img_tensor = preprocess(rgb) img_tensor.unsqueeze_(0) # Get heatmap for correct disease and turn into numpy array. heatmap = net.forward(img_tensor, return_cam=True) disease = pathologies[img_data[1]] heatmap = heatmap[0, disease].numpy() ground_truth = BBox(float(img_data[2]), float(img_data[3]), float(img_data[4]), float(img_data[5])) # Save results if evaluating all images, not just plotting one. if img_name is None: iobb, num = evaluate_single_bbox(ground_truth, heatmap, iobb=True) results.append((img_data[0], disease, iobb, num)) print(f'#{i}, n:{num}, {iobb}') else: iobb, _ = evaluate_single_bbox(ground_truth, heatmap, iobb=True, xray_img=xray_img) print(f'iobb: {iobb}') break if img_name is None: # Order results by IoBB value. results = sorted(results, key=lambda x: x[2], reverse=True) results = np.array(results) # Save as numpy array. np.save(f'../data/iou_blob_{args.model}.npy', results) # Save as txt with open(f'../data/iou_blob_{args.model}.txt', 'w') as txt: for i in range(results.shape[0]): txt.write( f'{float(results[i][2])}, {int(results[i][1])}, {int(results[i][3])}, {results[i][0]}\n' ) f.close() # Close csv file.
f.write('%s\t%s\t%s\n'%('model', 'ave roc', 'ave pr')) results = {} for model_name in model_names: results[model_name] = {} for activation in activations: trial_roc_mean = [] trial_roc_std = [] trial_pr_mean = [] trial_pr_std = [] for trial in range(num_trials): keras.backend.clear_session() # load model model = helper.load_model(model_name, activation=activation, input_shape=200) name = model_name+'_'+activation+'_'+str(trial) print('model: ' + name) # compile model helper.compile_model(model) # setup callbacks callbacks = helper.get_callbacks(monitor='val_aupr', patience=20, decay_patience=5, decay_factor=0.2) # fit model history = model.fit(x_train, y_train, epochs=100, batch_size=100,
def evaluate_NavNet(parIL, parMapNet, mapNet, ego_encoder, test_iter, test_ids, test_data, action_list): print("\nRunning validation on NavNet!") with torch.no_grad(): policy_net = hl.load_model(model_dir=parIL.model_dir, model_name="ILNet", test_iter=test_iter) acc, epi_length, path_ratio = 0, 0, 0 episode_results, episode_count = {}, 0 # store predictions for i in test_ids: test_ex = test_data[i] # Get all info for the starting position mapNet_input_start = prepare_mapNet_input(ex=test_ex) target_lbl = test_ex["target_lbl"] im_obsv = test_ex['image_obsv'].cuda() dets_obsv = test_ex['dets_obsv'].cuda() tvec = torch.zeros(1, parIL.nTargets).float().cuda() tvec[0,target_lbl] = 1 # We need to keep other info to allow us to do the steps later image_name, scene, scale = [], [], [] image_name.append(test_ex['image_name']) scene.append(test_ex['scene']) scale.append(test_ex['scale']) shortest_path_length = test_ex['path_length'] if parIL.use_p_gt: # get the ground-truth pose, which is the relative pose with respect to the first image info, annotations, _ = dh.load_scene_info(parIL.avd_root, scene[0]) im_names_all = info['image_name'] # info 0 # list of image names in the scene im_names_all = np.hstack(im_names_all) # flatten the array start_abs_pose = dh.get_image_poses(info, im_names_all, image_name, scale[0]) # init pose of the episode # 1 x 3 # Get state from mapNet p_, map_ = mapNet.forward_single_step(local_info=mapNet_input_start, t=0, input_flags=parMapNet.input_flags, update_type=parMapNet.update_type) collision_ = torch.tensor([0], dtype=torch.float32).cuda() # collision indicator is 0 if parIL.use_ego_obsv: enc_in = torch.cat((im_obsv, dets_obsv), 0).unsqueeze(0) ego_obsv_feat = ego_encoder(enc_in) # 1 x 512 x 1 x 1 state = (map_, p_, tvec, collision_, ego_obsv_feat) else: state = (map_, p_, tvec, collision_) current_im = image_name[0] done=0 image_seq, action_seq = [], [] image_seq.append(current_im) policy_net.hidden = policy_net.init_hidden(batch_size=1, state_items=len(state)-1) for t in range(1, parIL.max_steps+1): pred_costs = policy_net(state, parIL.use_ego_obsv) # apply policy for single step pred_costs = pred_costs.view(-1).cpu().numpy() # choose the action with a certain prob pred_probs = softmax(-pred_costs) pred_label = np.random.choice(len(action_list), 1, p=pred_probs)[0] pred_action = action_list[pred_label] # get the next image, check collision and goal next_im = test_data.scene_annotations[scene[0]][current_im][pred_action] if next_im=='': image_seq.append(current_im) else: image_seq.append(next_im) action_seq.append(pred_action) print(t, current_im, pred_action, next_im) if not(next_im==''): # not collision case collision = 0 # check for goal path_dist = len(nx.shortest_path(test_data.graphs_dict[target_lbl][scene[0]], next_im, "goal")) - 2 if path_dist <= parIL.steps_from_goal: # GOAL! acc += 1 epi_length += t path_ratio += t/float(shortest_path_length) # ratio of estimated path towards shortest path done=1 break # get next state from mapNet batch_next, obsv_batch_next = test_data.get_step_data(next_ims=[next_im], scenes=scene, scales=scale) if parIL.use_p_gt: next_im_abs_pose = dh.get_image_poses(info, im_names_all, [next_im], scale[0]) abs_poses = np.concatenate((start_abs_pose, next_im_abs_pose), axis=0) rel_poses = dh.relative_poses(poses=abs_poses) next_im_rel_pose = np.expand_dims(rel_poses[1,:], axis=0) p_gt = dh.build_p_gt(parMapNet, pose_gt_batch=np.expand_dims(next_im_rel_pose, axis=1)).squeeze(1) p_next, map_next = mapNet.forward_single_step(local_info=batch_next, t=t, input_flags=parMapNet.input_flags, map_previous=state[0], p_given=p_gt, update_type=parMapNet.update_type) else: p_next, map_next = mapNet.forward_single_step(local_info=batch_next, t=t, input_flags=parMapNet.input_flags, map_previous=state[0], update_type=parMapNet.update_type) if parIL.use_ego_obsv: enc_in = torch.cat(obsv_batch_next, 1) ego_obsv_feat = ego_encoder(enc_in) # b x 512 x 1 x 1 state = (map_next, p_next, tvec, torch.tensor([collision], dtype=torch.float32).cuda(), ego_obsv_feat) else: state = (map_next, p_next, tvec, torch.tensor([collision], dtype=torch.float32).cuda()) current_im = next_im else: # collision case collision = 1 if parIL.stop_on_collision: break if parIL.use_ego_obsv: state = (state[0], state[1], state[2], torch.tensor([collision], dtype=torch.float32).cuda(), state[4]) else: state = (state[0], state[1], state[2], torch.tensor([collision], dtype=torch.float32).cuda()) episode_results[episode_count] = (image_seq, action_seq, parIL.lbl_to_cat[target_lbl], done) episode_count+=1 # store the episodes episode_results_path = parIL.model_dir+'episode_results_eval_'+str(test_iter)+'.pkl' with open(episode_results_path, 'wb') as f: pickle.dump(episode_results, f) success_rate = acc / float(len(test_ids)) if acc > 0: mean_epi_length = epi_length / float(acc) avg_path_length_ratio = path_ratio / float(acc) else: mean_epi_length = 0 avg_path_length_ratio = 0 print("Test iter:", test_iter, "Success rate:", success_rate) print("Mean epi length:", mean_epi_length, "Avg path length ratio:", avg_path_length_ratio)
import json import pandas import helper model_r = helper.load_model("data/model_target_r") model_g = helper.load_model("data/model_target_g") model_b = helper.load_model("data/model_target_b") def do(features): """ Do the inference with a loaded model on specified features. Parameters ---------- features : list[dict[str, str]] Feature to process. It a list of dict containing feature names as key and feature abstracted values as values. Returns ------- A list of dict. All dict must have all three R, G, and B target. If one is missing, it will be considered as `null`. Notes ----- The order must be kept for a response to be considered valid by the datacrunch arena. If more or less lines are returned, everything will be considered as invalid.