def predict(inp, target, params, restore_from, config=None,\ model_dir='./ie590_project/experiments/ex1', model_save_dir='./ie590_project/experiments/ex1/model_save/1'): """predict target values given input file paths Args: inp: (list) a string list of image files paths; 2D -> [sample_size, number_of_channels] model_spec: (dict) model specifications of tf Ops params: (Params or str) Params object or params.joson path tar: (list) a float list of target values restore_from: (str) ckpt or directory name where ckpts are located for restoring ... Return: out: (list) a list of precicted target values; have exactly same dimension as target """ assert len(inp) == len(target) iterator_init_op = model_spec['iterator_init_op'] update_metrics_op = model_spec['update_metrics_op'] metrics = model_spec['metrics'] metrics_init_op = model_spec['metrics_init_op'] predictions = model_spec['predictions'] saver = tf.compat.v1.train.Saver() if type(params) is str: assert os.path.isfile( params), "params.json does not exits at {}".format(params) params = Params(params) params.load(params.update) # load parameters params.inp_size = len(inp) set_logger(os.path.join(model_dir, 'train.log')) logging.info("Creating the dataset...") inputs = input_fn(False, inp, target, params) logging.info("Creating the model...") model_spec = model_fn(False, inputs, params) logging.info("Calculating predictions...") with tf.compat.v1.Session(config=config) as sess: sess.run(model_spec['variable_init_op']) save_path = os.path.join(model_save_dir, restore_from) if os.path.isdir(save_path): save_path = tf.train.latest_checkpoint( save_path ) # If restore_from is a directory, get the latest ckpt saver.restore(sess, save_path) num_steps = (params.inp_size + params.batch_size - 1) // params.batch_size sess.run([iterator_init_op, metrics_init_op]) if len(np.shape(target)) == 1: out = np.empty(np.shape(target))[:, np.newaxis] else: out = np.empty(np.shape(target)) for i in tqdm(range(num_steps)): _, predictions_eval = sess.run([update_metrics_op, predictions]) if i < num_steps - 1: out[i * params.batch_size:(i + 1) * params.batch_size, :] = predictions_eval else: out[i * params.batch_size:, :] = predictions_eval return out
for _ in range(num_steps): _, predictions_eval = sess.run([update_metrics_op, predictions]) if __name__ == '__main__': start_time = time.time() #for reproducibility tf.compat.v1.set_random_seed(123) args = parser.parse_args() params_path = os.path.join(args.model_dir, 'params.json') assert os.path.isfile( params_path), "params.json does not exits at {}".format(params_path) params = Params(params_path) params.load(params.update) #TODO: check and load if there's the best weights so far # model_dir_has_best_weights = os.path.isdir(os.path.join(args.model_dir, "best_weights")) #set logger set_logger(os.path.join(args.model_dir, 'train.log')) #train/test split train_fpaths, test_fpaths, train_targets, test_targets = \ get_train_test_split(args.json_path, args.data_dir, train_size=args.train_size) params.train_size = len(train_fpaths) params.test_size = len(test_fpaths) logging.info("Creating the dataset...")