def vis_latent_walk(params): agent = agent_lookup(params) restore_model(agent, params['restore'], params['use_cuda']) if params['use_cuda']: agent.cuda() agent.eval() if os.path.exists('./out/latents/beta1000/walk/'): shutil.rmtree('./out/latents/beta1000/walk/') for i in range(agent.rep_size): os.makedirs('./out/latents/beta1000/walk/{0}'.format(i + 1)) z, r = agent.sample_latent() r = r.numpy()[0] img = get_img_state(np.split(r, 4)) img.save(open('./out/latents/beta1000/walk/orig.png', 'wb')) deltas = np.linspace(-5, 5, 100) for dim in range(agent.rep_size): print 'Walking latent space along dimension {0}'.format(dim + 1) for i, delta in enumerate(deltas): cz = z.clone() cz[:, dim] += delta img = agent.decode_latent(cz).numpy()[0] img = get_img_state(np.split(img, 4)) img.save( open( './out/latents/beta1000/walk/{0}/{1}.png'.format( dim + 1, i), 'wb'))
def detect_pores(imgs): with tf.Graph().as_default(): # placeholder for image image_pl, _ = utils.placeholder_inputs() # build detection net print('Building detection net graph...') det_net = detection.Net(image_pl, training=False) print('Done') with tf.Session() as sess: print('Restoring detection model in {}...'.format( FLAGS.det_model_dir)) utils.restore_model(sess, FLAGS.det_model_dir) print('Done') # capture detection arguments in function def detect_pores(image): return utils.detect_pores(image, image_pl, det_net.predictions, FLAGS.det_patch_size, FLAGS.det_prob_thr, FLAGS.nms_inter_thr, sess) # detect pores dets = [detect_pores(img) for img in imgs] return dets
def describe_detections(imgs, dets): with tf.Graph().as_default(): # placeholder for image image_pl, _ = utils.placeholder_inputs() # build description net print('Building description net graph...') desc_net = description.Net(image_pl, training=False) print('Done') with tf.Session() as sess: print('Restoring description model in {}...'.format( FLAGS.desc_model_dir)) utils.restore_model(sess, FLAGS.desc_model_dir) print('Done') # capture description arguments in function def compute_descriptors(image, dets): return utils.trained_descriptors(image, dets, FLAGS.desc_patch_size, sess, image_pl, desc_net.descriptors) # compute descriptors descs = [ compute_descriptors(img, img_dets) for img, img_dets in zip(imgs, dets) ] return descs
def main(): half_patch_size = FLAGS.patch_size // 2 with tf.Graph().as_default(): image_pl, _ = utils.placeholder_inputs() print('Building graph...') net = models.FCN(image_pl, training=False) print('Done') with tf.Session() as sess: print('Restoring model in {}...'.format(FLAGS.model_dir_path)) utils.restore_model(sess, FLAGS.model_dir_path) print('Done') print('Loading image...') image = utils.load_image(FLAGS.image_path) print('Done') print('Detecing pores...') detections = utils.detect_pores(image, image_pl, net.predictions, half_patch_size, FLAGS.prob_thr, FLAGS.inter_thr, sess) print('Done') print('Saving detections to {}...'.format(FLAGS.save_path)) utils.save_dets_txt(detections, FLAGS.save_path) print('Done')
def main(_): assert FLAGS.checkpoint_dir, "--checkpoint_dir is required." assert FLAGS.source_test_path, "--source_test_path is required." assert FLAGS.target_test_path, "--target_test_path is required." assert FLAGS.reference_test_path, "--reference_test_path is required." assert FLAGS.source_vocab_path, "--souce_vocab_path is required." assert FLAGS.target_vocab_path, "--target_vocab_path is required." # Read vocabularies. source_vocab, _ = utils.initialize_vocabulary(FLAGS.source_vocab_path) target_vocab, _ = utils.initialize_vocabulary(FLAGS.target_vocab_path) # Read test set. source_sentences, target_sentences, references = utils.read_data_with_ref( FLAGS.source_test_path, FLAGS.target_test_path, FLAGS.reference_test_path) # Convert sentences to token ids sequences. source_sentences_ids = [ utils.sentence_to_token_ids(sent, source_vocab, FLAGS.max_seq_length) for sent in source_sentences ] target_sentences_ids = [ utils.sentence_to_token_ids(sent, target_vocab, FLAGS.max_seq_length) for sent in target_sentences ] utils.reset_graph() with tf.Session() as sess: # Restore saved model. utils.restore_model(sess, FLAGS.checkpoint_dir) # Recover placeholders and ops for evaluation. x_source = sess.graph.get_tensor_by_name("x_source:0") source_seq_length = sess.graph.get_tensor_by_name( "source_seq_length:0") x_target = sess.graph.get_tensor_by_name("x_target:0") target_seq_length = sess.graph.get_tensor_by_name( "target_seq_length:0") labels = sess.graph.get_tensor_by_name("labels:0") placeholders = [ x_source, source_seq_length, x_target, target_seq_length, labels ] probs = sess.graph.get_tensor_by_name("feed_forward/output/probs:0") # Run evaluation. evaluate(sess, source_sentences, target_sentences, references, source_sentences_ids, target_sentences_ids, probs, placeholders)
def vis_rand_latents(params): agent = agent_lookup(params) restore_model(agent, params['restore'], params['use_cuda']) if params['use_cuda']: agent.cuda() agent.eval() for i in range(100): _, r = agent.sample_latent() r = r.numpy()[0] img = get_img_state(np.split(r, 4)) img.save(open('./out/latents/beta2/{0}.png'.format(i), 'wb'))
def main(arg0): # Load Parameters of the model from save_dir/config.pkl print "Load parameters of the model..." with open(os.path.join(arg0.save_dir, 'config.pkl'), 'rb') as f: args = cPickle.load(f) arg = vars(args) # Program Parameters dataset = arg['dataset'] print 'dataset : {}'.format(dataset) loadfrom = os.path.join(arg0.save_dir, arg['model_file']) # Topology model conv_type = arg['conv_type'] filter_shape = arg['filter_shape'] kernel_size = arg['kernel_size'] kernel_pool_size = arg['kernel_pool_size'] print 'conv_type : {}'.format(conv_type) print 'filter_shape : {}'.format(filter_shape) if conv_type == 'double': print 'kernel_size : {}'.format(kernel_size) print 'kernel_pool_size : {}'.format(kernel_size) # Model data batch_size = arg['batch_size'] # Data [_, _, test, num_class, image_shape] = utils.load_normalize_data(dataset) (test_x, test_y, test_num) = test # Topology model = Model(image_shape, filter_shape, num_class, conv_type, kernel_size, kernel_pool_size) with tf.Session() as sess: print "Loading model..." saver = tf.train.Saver() utils.restore_model(saver, sess, loadfrom) print "Compute error on test set..." n_test_batches = test_num / batch_size _, err = utils.fwd_eval(sess, model, test_x, test_y, batch_size, n_test_batches) print 'The error on test set of {} is {:.4f}'.format(dataset, err)
def main(): half_patch_size = FLAGS.patch_size // 2 with tf.Graph().as_default(): image_pl, _ = utils.placeholder_inputs() print('Building graph...') net = detection.Net(image_pl, training=False) print('Done') with tf.Session() as sess: print('Restoring model in {}...'.format(FLAGS.model_dir_path)) utils.restore_model(sess, FLAGS.model_dir_path) print('Done') # capture arguments in lambda function def detect_pores(image): return utils.detect_pores(image, image_pl, net.predictions, half_patch_size, FLAGS.prob_thr, FLAGS.inter_thr, sess) # batch detect in dbi training print('Detecting pores in PolyU-HRF DBI Training images...') load_path = os.path.join(FLAGS.polyu_dir_path, 'DBI', 'Training') save_path = os.path.join(FLAGS.results_dir_path, 'DBI', 'Training') batch_detect(load_path, save_path, detect_pores) print('Done') # batch detect in dbi test print('Detecting pores in PolyU-HRF DBI Test images...') load_path = os.path.join(FLAGS.polyu_dir_path, 'DBI', 'Test') save_path = os.path.join(FLAGS.results_dir_path, 'DBI', 'Test') batch_detect(load_path, save_path, detect_pores) print('Done') # batch detect in dbii print('Detecting pores in PolyU-HRF DBII images...') load_path = os.path.join(FLAGS.polyu_dir_path, 'DBII') save_path = os.path.join(FLAGS.results_dir_path, 'DBII') batch_detect(load_path, save_path, detect_pores) print('Done')
def main(arg): # Program Parameters dataset = arg['dataset'] save_dir = arg['save_dir'] load_model = arg['load'] model_filename = arg['model_file'] # Topology model conv_type = arg['conv_type'] filter_shape = arg['filter_shape'] kernel_size = arg['kernel_size'] kernel_pool_size = arg['kernel_pool_size'] # Data [train, valid, _, num_class, image_shape] = utils.load_normalize_data(dataset) # Save/load saveto = os.path.join( save_dir, model_filename) if model_filename is not None else None loadfrom = saveto if load_model else None # Topology model = Model(image_shape, filter_shape, num_class, conv_type, kernel_size, kernel_pool_size) with tf.Session() as sess: saver = tf.train.Saver() save = utils.store_model(saver, sess, saveto) # Load the variables of the model if wanted if load_model: print "Loading model..." utils.restore_model(saver, sess, loadfrom) else: sess.run(tf.global_variables_initializer()) training(sess, model, arg, train, valid, save)
def main(_): assert FLAGS.checkpoint_dir, "--checkpoint_dir is required." assert FLAGS.extract_dir, "--extract_dir is required." assert FLAGS.source_vocab_path, "--source_vocab_path is required." assert FLAGS.target_vocab_path, "--target_vocab_path is required." assert FLAGS.source_output_path, "--source_output_path is required." assert FLAGS.target_output_path, "--target_output_path is required." assert FLAGS.score_output_path, "--score_output_path is required." assert FLAGS.source_language, "--source_language is required." assert FLAGS.target_language, "--target_language is required." # Read vocabularies. source_vocab, _ = utils.initialize_vocabulary(FLAGS.source_vocab_path) target_vocab, _ = utils.initialize_vocabulary(FLAGS.target_vocab_path) source_vocab_words = read_vocabulary(FLAGS.source_vocab_path) target_vocab_words = read_vocabulary(FLAGS.target_vocab_path) utils.reset_graph() with tf.Session() as sess: # Restore saved model. utils.restore_model(sess, FLAGS.checkpoint_dir) # Recover placeholders and ops for extraction. x_source = sess.graph.get_tensor_by_name("x_source:0") source_seq_length = sess.graph.get_tensor_by_name( "source_seq_length:0") x_target = sess.graph.get_tensor_by_name("x_target:0") target_seq_length = sess.graph.get_tensor_by_name( "target_seq_length:0") labels = sess.graph.get_tensor_by_name("labels:0") placeholders = [ x_source, source_seq_length, x_target, target_seq_length, labels ] probs = sess.graph.get_tensor_by_name("feed_forward/output/probs:0") with open(FLAGS.source_output_path, mode="w", encoding="utf-8") as source_output_file, \ open(FLAGS.target_output_path, mode="w", encoding="utf-8") as target_output_file, \ open(FLAGS.score_output_path, mode="w", encoding="utf-8") as score_output_file: source_docs, target_docs = read_docs(FLAGS.extract_dir, source_vocab, target_vocab) pairs = extract_pairs(sess, source_docs, target_docs, source_sentences_ids, target_sentences_ids, probs, placeholders) #for source_path, target_path in zip(source_paths, target_paths): for source_path, target_path in itertools.product( source_paths, target_paths): #print("paths", source_path, target_path) # Read sentences from articles. source_sentences, target_sentences = read_articles( source_path, target_path) # Convert sentences to token ids sequences. source_sentences_ids = [ utils.sentence_to_token_ids(sent, source_vocab, FLAGS.max_seq_length) for sent in source_sentences ] target_sentences_ids = [ utils.sentence_to_token_ids(sent, target_vocab, FLAGS.max_seq_length) for sent in target_sentences ] # Extract sentence pairs. pairs = extract_pairs(sess, source_sentences, target_sentences, source_sentences_ids, target_sentences_ids, probs, placeholders) if not pairs: continue for source_sentence, target_sentence, score in pairs: source_output_file.write(source_sentence) target_output_file.write(target_sentence) score_output_file.write(str(score) + "\n")
train_op = tf.train.AdamOptimizer(args.learning_rate).minimize(loss) correct_prediction = tf.equal(pred_thresh, y_) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) saver = tf.train.Saver(tf.global_variables()) init_step = 0 with tf.Session() as sess: sess.run(tf.global_variables_initializer()) if args.expert: sess.run(tf.assign(w, diff.reshape(784, 1))) elif args.restore: restore_model(sess, saver, args.restore) if args.train: batches_per_epoch = np.floor( train_x.shape[0] / args.batch_size) - 1 for step in range(args.max_steps): s = int(step * args.batch_size % batches_per_epoch) e = int(s + args.batch_size) start_time = time.time() # adversarial training if args.adv: batch_adv = sess.run(x_adv, feed_dict={ x: train_x[s:e], y_: train_labels[s:e] })
def main(): # parse descriptor and adjust accordingly compute_descriptors = None if FLAGS.descriptors == 'sift': compute_descriptors = utils.sift_descriptors elif FLAGS.descriptors == 'dp': if FLAGS.patch_size is None: raise TypeError('Patch size is required when using dp descriptor') compute_descriptors = lambda img, pts: utils.dp_descriptors( img, pts, FLAGS.patch_size) else: if FLAGS.model_dir_path is None: raise TypeError( 'Trained model path is required when using trained descriptor') if FLAGS.patch_size is None: raise TypeError( 'Patch size is required when using trained descriptor') # create net graph and restore saved model from models import description img_pl, _ = utils.placeholder_inputs() net = description.Net(img_pl, training=False) sess = tf.Session() print('Restoring model in {}...'.format(FLAGS.model_dir_path)) utils.restore_model(sess, FLAGS.model_dir_path) print('Done') compute_descriptors = lambda img, pts: utils.trained_descriptors( img, pts, FLAGS.patch_size, sess, img_pl, net.descriptors) # parse matching mode and adjust accordingly if FLAGS.mode == 'basic': match = matching.basic else: match = matching.spatial # make dir path be full appropriate dir path imgs_dir_path = None pts_dir_path = None subject_ids = None register_ids = None session_ids = None if FLAGS.fold == 'DBI-train': # adjust paths for appropriate fold imgs_dir_path = os.path.join(FLAGS.polyu_dir_path, 'DBI', 'Training') pts_dir_path = os.path.join(FLAGS.pts_dir_path, 'DBI', 'Training') # adjust ids for appropriate fold subject_ids = [ 6, 9, 11, 13, 16, 18, 34, 41, 42, 47, 62, 67, 118, 186, 187, 188, 196, 198, 202, 207, 223, 225, 226, 228, 242, 271, 272, 278, 287, 293, 297, 307, 311, 321, 323 ] register_ids = [1, 2, 3] session_ids = [1, 2] else: # adjust paths for appropriate fold if FLAGS.fold == 'DBI-test': imgs_dir_path = os.path.join(FLAGS.polyu_dir_path, 'DBI', 'Test') pts_dir_path = os.path.join(FLAGS.pts_dir_path, 'DBI', 'Test') else: imgs_dir_path = os.path.join(FLAGS.polyu_dir_path, 'DBII') pts_dir_path = os.path.join(FLAGS.pts_dir_path, 'DBII') # adjust ids for appropriate fold subject_ids = [ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168 ] register_ids = [1, 2, 3, 4, 5] session_ids = [1, 2] # load images, points, compute descriptors and make indices correspondences print('Loading images and detections, and computing descriptors...') all_descs, all_pts, id2index = load_dataset(imgs_dir_path, pts_dir_path, subject_ids, session_ids, register_ids, compute_descriptors) print('Done') print('Matching...') pos, neg = polyu_match(all_descs, all_pts, subject_ids, register_ids, id2index, match, thr=FLAGS.thr) print('Done') # print equal error rate print('EER = {}'.format(utils.eer(pos, neg))) # save results to file if FLAGS.results_path is not None: print('Saving results to file {}...'.format(FLAGS.results_path)) # create directory tree, if non-existing dirname = os.path.dirname(FLAGS.results_path) dirname = os.path.abspath(dirname) if not os.path.exists(dirname): os.makedirs(dirname) # save comparisons with open(FLAGS.results_path, 'w') as f: # save same subject scores for score in pos: print(1, score, file=f) # save different subject scores for score in neg: print(0, score, file=f) # save invoking command string with open(FLAGS.results_path + '.cmd', 'w') as f: print(*sys.argv, file=f) print('Done')
def main(_): assert FLAGS.checkpoint_dir, "--checkpoint_dir is required." assert FLAGS.extract_dir, "--extract_dir is required." assert FLAGS.source_vocab_path, "--source_vocab_path is required." assert FLAGS.target_vocab_path, "--target_vocab_path is required." assert FLAGS.source_output_path, "--source_output_path is required." assert FLAGS.target_output_path, "--target_output_path is required." assert FLAGS.score_output_path, "--score_output_path is required." assert FLAGS.source_language, "--source_language is required." assert FLAGS.target_language, "--target_language is required." # Read vocabularies. source_vocab, _ = utils.initialize_vocabulary(FLAGS.source_vocab_path) target_vocab, _ = utils.initialize_vocabulary(FLAGS.target_vocab_path) # Read source and target paths for sentence extraction. source_paths = [] target_paths = [] for file in os.listdir(FLAGS.extract_dir): if file.endswith(FLAGS.source_language): source_paths.append(os.path.join(FLAGS.extract_dir, file)) elif file.endswith(FLAGS.target_language): target_paths.append(os.path.join(FLAGS.extract_dir, file)) source_paths.sort() target_paths.sort() utils.reset_graph() with tf.Session() as sess: # Restore saved model. utils.restore_model(sess, FLAGS.checkpoint_dir) # Recover placeholders and ops for extraction. x_source = sess.graph.get_tensor_by_name("x_source:0") source_seq_length = sess.graph.get_tensor_by_name("source_seq_length:0") x_target = sess.graph.get_tensor_by_name("x_target:0") target_seq_length = sess.graph.get_tensor_by_name("target_seq_length:0") labels = sess.graph.get_tensor_by_name("labels:0") placeholders = [x_source, source_seq_length, x_target, target_seq_length, labels] probs = sess.graph.get_tensor_by_name("feed_forward/output/probs:0") source_final_state_ph = sess.graph.get_tensor_by_name("birnn/source_final_state_ph:0") with open(FLAGS.source_output_path, mode="w", encoding="utf-8") as source_output_file,\ open(FLAGS.target_output_path, mode="w", encoding="utf-8") as target_output_file,\ open(FLAGS.score_output_path, mode="w", encoding="utf-8") as score_output_file: for source_path, target_path in zip(source_paths, target_paths): # Read sentences from articles. source_sentences, target_sentences = read_articles(source_path, target_path) # Convert sentences to token ids sequences. source_sentences_ids = [utils.sentence_to_token_ids(sent, source_vocab, FLAGS.max_seq_length) for sent in source_sentences] target_sentences_ids = [utils.sentence_to_token_ids(sent, target_vocab, FLAGS.max_seq_length) for sent in target_sentences] # Extract sentence pairs. pairs = extract_pairs(sess, source_sentences, target_sentences, source_sentences_ids, target_sentences_ids, probs, placeholders, source_final_state_ph) if not pairs: continue for source_sentence, target_sentence, score in pairs: source_output_file.write(source_sentence) target_output_file.write(target_sentence) score_output_file.write(str(score) + "\n")
def weather_prediction(feats, labels, parameters, masks): ''' weather prediction wrapper for solar forecast ''' # create results dictionary under each weather sunny_results = {} cloudy_results = {} partly_cloudy_results = {} # get parameters of SVR model under each weather types if parameters['SUNNY']['para_path'] is not None: logging.info('Loading given model on disk ...') sunny_model = utl.restore_model(\ parameters['SUNNY']['para_path']) else: logging.info('Restore model on disk ...') sunny_model = utl.restore_model(\ parameters['SUNNY']['name'] + '-grid-search') logging.info("Sunny model loaded is {}".format(sunny_model)) if parameters['CLOUDY']['para_path'] is not None: logging.info('Loading given model on disk ...') cloudy_model = utl.restore_model(\ parameters['CLOUDY']['para_path']) else: logging.info('Restore model on disk ...') cloudy_model = utl.restore_model(\ parameters['CLOUDY']['name'] + '-grid-search') logging.info("Cloudy model loaded is {}".format(cloudy_model)) if parameters['PARTLY CLOUDY']['para_path'] is not None: logging.info('Loading given model on disk ...') partly_cloudy_model = utl.restore_model(\ parameters['PARTLY CLOUDY']['para_path']) else: logging.info('Restore model on disk ...') partly_cloudy_model = utl.restore_model(\ parameters['PARTLY CLOUDY']['name'] + '-grid-search') logging.info("Partly cloudy model loaded is {}"\ .format(partly_cloudy_model)) # perform prediction based on weather types # sunny day logging.info("Start performing predictions on sunny days ...") sunny_pred = predict(feats['sunny'], sunny_model) sunny_results['prediction'] = sunny_pred logging.info("Weather prediction on sunny days is complete.") # cloudy day logging.info("Start performing predictions on cloudy days ...") cloudy_pred = predict(feats['cloudy'], cloudy_model) cloudy_results['prediction'] = cloudy_pred logging.info("Weather prediction on cloudy days is complete.") # partly cloudy day logging.info("Start performing predictions on partly cloudy days ...") partly_cloudy_pred = predict(feats['partly_cloudy'], partly_cloudy_model) partly_cloudy_results['prediction'] = partly_cloudy_pred logging.info("Weather prediction on partly cloudy days is complete.") if labels is not None: # computing errors sunny_errors = utl.compute_error(sunny_pred, labels['sunny']) sunny_results['rmse_errors'] = sunny_errors logging.info("RMSE errors of sunny days: {}".format(sunny_errors)) cloudy_errors = utl.compute_error(cloudy_pred, labels['cloudy']) cloudy_results['rmse_errors'] = cloudy_errors logging.info("RMSE errors of cloudy days: {}".format(cloudy_errors)) partly_cloudy_errors = utl.compute_error(partly_cloudy_pred,\ labels['partly_cloudy']) partly_cloudy_results['rmse_errors'] = partly_cloudy_errors logging.info("RMSE errors of partly cloudy days: {}"\ .format(partly_cloudy_errors)) # comparison between prediction and true labels sunny_fig = utl.compare_pred_results(sunny_pred, labels['sunny'],\ 'sunny', style='k.') cloudy_fig = utl.compare_pred_results(cloudy_pred, labels['cloudy'],\ 'cloudy', style='k.') partly_cloudy_fig = utl.compare_pred_results(partly_cloudy_pred,\ labels['partly_cloudy'], 'partly cloudy', style='k.') preds = {'sunny': sunny_pred, 'cloudy': cloudy_pred,\ 'partly_cloudy': partly_cloudy_pred} # plot predictions and measurements fig_pred_meas, preds_total = utl.compare_preds_labels( preds, labels, masks, ) # save fig fig_path = parameters['SUNNY']['fig_path'] + '.png' sunny_fig.savefig(fig_path) fig_path = parameters['CLOUDY']['fig_path'] + '.png' cloudy_fig.savefig(fig_path) fig_path = parameters['PARTLY CLOUDY']['fig_path'] + '.png' partly_cloudy_fig.savefig(fig_path) fig_path = parameters['fig_folder'] + 'pred_vs_meas' + '.png' partly_cloudy_fig.savefig(fig_path) if parameters['FLAG']['show_figs']: plt.show() return preds_total
def weather_classification(feats, MODE, labels=None): logging.info("Start classifying weather types ...") ''' Classify weather types in a hard way: ''' # SUNNY_THRESHOLD = 0.05 # CLOUDY_THRESHOLD = 0.95 # sunny_mask = feats[:,6] <= SUNNY_THRESHOLD # cloudy_mask = feats[:,6] >= CLOUDY_THRESHOLD # partly_cloudy_mask = ~ (sunny_mask | cloudy_mask) ''' Use K-means algorithm to classify different weather types automatically: ''' if (MODE == 'grid search' or MODE == 'holdout training'): kmeans = KMeans(n_clusters=3, random_state=0).fit(feats[:, [0, 1, 2, 3, 4, 6, 7]]) cluster_center = kmeans.cluster_centers_ ''' sort according to cloud fraction ''' sort_ind = cluster_center[:, 5].argsort() print 'clustering centers are:\n', cluster_center utl.save_model(kmeans, 'weather-classification_k-means') sunny_mask = kmeans.labels_ == sort_ind[0] partly_cloudy_mask = kmeans.labels_ == sort_ind[1] cloudy_mask = kmeans.labels_ == sort_ind[2] elif (MODE == 'weather prediction'): kmeans = utl.restore_model('weather-classification_k-means') cluster_center = kmeans.cluster_centers_ predicted_label = kmeans.predict(feats[:, [0, 1, 2, 3, 4, 6, 7]]) sort_ind = cluster_center[:, 5].argsort() print 'sort ind is:', sort_ind # dictionary = dict(zip(keys, values)) sunny_mask = predicted_label == sort_ind[0] partly_cloudy_mask = predicted_label == sort_ind[1] cloudy_mask = predicted_label == sort_ind[2] mask_dict = { 'sunny': sunny_mask, 'cloudy': cloudy_mask, 'partly_cloudy': partly_cloudy_mask } feats_dict = { 'sunny': feats[sunny_mask, :], 'cloudy': feats[cloudy_mask, :], 'partly_cloudy': feats[partly_cloudy_mask, :] } for key, val in feats_dict.items(): logging.debug('{} feature has shape: {}'.format(key, val.shape)) if labels is not None: labels_dict = { 'sunny': labels[sunny_mask], 'cloudy': labels[cloudy_mask], 'partly_cloudy': labels[partly_cloudy_mask] } for key, val in labels_dict.items(): logging.debug('{} label has shape: {}'.format(key, val.shape)) else: labels_dict = None logging.info("Weather types classified.") return feats_dict, labels_dict, mask_dict
'PoreGroundTruth') dataset = polyu.Dataset(os.path.join(polyu_path, 'PoreGroundTruthSampleimage'), os.path.join(polyu_path, 'PoreGroundTruthMarked'), split=(15, 5, 10), patch_size=flags.patch_size) print('Loaded') # gets placeholders for patches and labels patches_pl, labels_pl = utils.placeholder_inputs() with tf.Session() as sess: # build graph and restore model print('Restoring model...') net = models.FCN(patches_pl, training=False) utils.restore_model(sess, flags.model_dir_path) print('Done') # compute statistics f_score = None tdr = None fdr = None if flags.post == 'traditional': print('Generating proposals for test set...') pores, proposals = generate_proposals(sess, net.predictions, patches_pl, dataset.test) print('Done') print('Post-processing...') detections = [] for proposal in proposals:
def eval_agent_parallel(envs, params): preprocessors = [] for _ in range(params['num_envs']): if params['use_preproc']: preprocessor = Preprocessor(params['state_dim'], params['history'], params['use_luminance'], params['resize_shape']) params['state_dim'] = preprocessor.state_shape else: preprocessor = None preprocessors.append(preprocessor) agent = agent_lookup(params) restore_model(agent, params['restore'], params['use_cuda']) if params['use_cuda']: agent.cuda() agent.eval() episode_rewards = [] start = time.time() for episode in xrange(1, params['num_episodes'] + 1): env_states = [env.reset() for env in envs] states = [ preprocessors[i].process_state(env_states[i]) if preprocessors[i] else env_states[i] for i in range(len(envs)) ] env_status = [False for _ in envs] episode_reward = [0.0 for _ in envs] for t in xrange(1, params['max_steps'] + 1): if reduce(lambda x, y: x and y, env_status): break for i, env in enumerate(envs): if params['env_render']: env.render() if env_status[i]: continue var_state = createVariable(states[i], use_cuda=params['use_cuda']) action, state_val = agent.sample_action_eval(var_state) reward = 0.0 for _ in range(1): env_states[i], r, terminal, _ = env.step(action) reward += r if terminal: env_status[i] = True break # episode_reward[i] += reward states[i] = preprocessors[i].process_state( env_states[i]) if preprocessors[i] else env_states[i] for p in preprocessors: p.reset() episode_rewards.extend(episode_reward) if episode % params['print_every'] == 0: print 'Episode {0} | Total Reward {1} | Mean Reward {2} | Total Time {3} ' \ .format(episode, episode_reward, sum(episode_rewards[-100:]) / 100, timeSince(start, episode / params['num_episodes']))
def restore_description(): # create network graph inputs, _ = utils.placeholder_inputs() net = description.Net(inputs) # save random weights and keep them # in program's memory for comparison vars_ = [] saver = tf.train.Saver() with tf.Session() as sess: # initialize variables sess.run(tf.global_variables_initializer()) # assign random values to variables # and save those values for comparison for var in sorted(tf.global_variables(), key=lambda x: x.name): # create random values for variable var_val = np.random.random(var.shape) # save for later comparison vars_.append(var_val) # assign it to tf var assign = tf.assign(var, var_val) sess.run(assign) # save initialized model saver.save(sess, '/tmp/description/model.ckpt', global_step=0) # create new session to restore saved weights with tf.Session() as sess: # make new initialization of weights sess.run(tf.global_variables_initializer()) # assert weights are different i = 0 for var in sorted(tf.global_variables(), key=lambda x: x.name): # get new var val var_val = sess.run(var) # compare with old one assert not np.isclose(np.sum(np.abs(var_val - vars_[i])), 0) i += 1 # restore model utils.restore_model(sess, '/tmp/description') # check if weights are equal i = 0 for var in sorted(tf.global_variables(), key=lambda x: x.name): # get new var val var_val = sess.run(var) # compare with old one if ~np.any(np.isclose(var_val, vars_[i])): print(np.isclose(var_val, vars_[i])) print('Failed to load variable "{}"'.format(var.name)) return False i += 1 return True
def train_agent_parallel(envs, params): preprocessors = [] for _ in range(params['num_envs']): if params['use_preproc']: preprocessor = Preprocessor(params['state_dim'], params['history'], params['use_luminance'], params['resize_shape']) params['state_dim'] = preprocessor.state_shape else: preprocessor = None preprocessors.append(preprocessor) agent = agent_lookup(params) if params['optim'] == 'rms': optimizer = torch.optim.RMSprop(agent.parameters(), lr=params['learning_rate']) elif params['optim'] == 'adam': optimizer = torch.optim.Adam(agent.parameters(), lr=params['learning_rate']) else: print 'Unknown optimizer specified!' sys.exit(0) if params['restore'] is not None: restore_model(agent, params['restore'], params['use_cuda']) if params['use_cuda']: agent = agent.cuda() agent.train() if params['arch'] == 'DBAgentAE': agent.eval() episode_rewards = [] start = time.time() total_steps = 0 for episode in xrange(1, params['num_episodes'] + 1): env_states = [env.reset() for env in envs] states = [ preprocessors[i].process_state(env_states[i]) if preprocessors[i] else env_states[i] for i in range(len(envs)) ] env_status = [False for _ in envs] episode_reward = [0.0 for _ in envs] loss_dict = defaultdict(float) num_updates = 0 for t in xrange(1, params['max_steps'] + 1): if reduce(lambda x, y: x and y, env_status): break for i, env in enumerate(envs): if params['env_render']: env.render() if env_status[i]: continue var_state = createVariable(states[i], use_cuda=params['use_cuda']) action, state_val = agent.sample_action(var_state, i=i) reward = 0.0 for _ in range(1): env_states[i], r, terminal, _ = env.step(action) reward += r if terminal: env_status[i] = True break episode_reward[i] += reward states[i] = preprocessors[i].process_state( env_states[i]) if preprocessors[i] else env_states[i] if t % params['update_freq'] == 0: l_dict = train_step_parallel(agent, optimizer, params) loss_dict = merge_loss_dicts(loss_dict, l_dict) num_updates += 1 for i, env in enumerate(envs): agent.rewards[i].append(0.0) l_dict = train_step_parallel(agent, optimizer, params) loss_dict = merge_loss_dicts(loss_dict, l_dict) num_updates += 1 for p in preprocessors: p.reset() episode_rewards.extend(episode_reward) # Might need this later visit = 0 total_steps += t if episode % params['print_every'] == 0: print 'Episode {0} | Total Reward {1} | Total Steps {6} | Mean Reward {2} | Losses {3} | Total Time {4} | SA {5} ' \ .format(episode, episode_reward, sum(episode_rewards[-100:]) / 100, {k: v / num_updates for k, v in loss_dict.iteritems()}, timeSince(start, episode / params['num_episodes']), visit, total_steps) if episode % params['save_every'] == 0: torch.save( agent.state_dict(), './agents/{0}_{1}_{2}_{3}'.format(params['arch'], params['env_name'], int(params['beta']), params['seed']))
data_dict['y_pdb'].extend(pdb) data_dict['y_true'].extend(y_true.data) data_dict['y_pred'].extend(output.data) return data_dict if __name__ == '__main__': logger = logging.getLogger(__name__) logger.setLevel(logging.DEBUG) logger.addHandler(logging.StreamHandler(sys.stdout)) args = get_arguments() cli_args = {key: value for key, value in vars(args).items() if value} # Commandline arguments get higher priority over default configuration values test_config = ChainMap(cli_args, DEFAULT_TEST_CONFIG) restore_file = os.path.join(test_config.model_dir, 'model.pth.tar') checkpoint = torch.load(restore_file) model = CNNModel() model.load_state_dict(checkpoint['state_dict']) model = utils.restore_model(test_config.model_dir, 'model.pth.tar') result_dict = test(model, test_loader) saved_csv = os.path.join(test_config.out_csv_dir, 'predictions.csv') utils.save_dict_to_csv(saved_csv, result_dict)
vocab, maxlen, error_rate=error_rate, shuffle=False) tokens = tokenize(test_sentence) tokens = list(filter(None, tokens)) nb_tokens = len(tokens) misspelled_tokens, _, target_tokens = transform(tokens, maxlen, error_rate=error_rate, shuffle=False) input_chars = set(' '.join(train_encoder)) target_chars = set(' '.join(train_decoder)) input_ctable = CharacterTable(input_chars) target_ctable = CharacterTable(target_chars) encoder_model, decoder_model = restore_model(model_path, hidden_size) input_tokens, target_tokens, decoded_tokens = decode_sequences( misspelled_tokens, target_tokens, input_ctable, target_ctable, maxlen, reverse, encoder_model, decoder_model, nb_tokens, sample_mode=sample_mode, random=False) print('-')