def __init__(self, params, original_filename, vocabulary_path=None, only_eval=False, text_to_eval=None): Data.__init__(self, original_filename, vocabulary_path, params, text_to_eval) self.only_eval = only_eval # if the file does not have targets if text_to_eval is not None: self.only_eval = True
def load_and_test(model_dir): a = Data() model = Model(data=a) model.load_model(log_model_dir=model_dir) model.test(test_image_id=50, save_dir=model.test_image_save_dir) model.test(test_image_id=350, save_dir=model.test_image_save_dir) for i in range(1, 500): model.test(i, save_dir=model.test_image_save_dir) model.end()
def train(): a = Data() model = Model(data=a) model.log_config() model.train() model.save_model() # model.load_model( # log_model_dir='/home/mars/ANN/dls/PatternRecognitionCourseFinalProject/log/6-11-17-21-16/model/model.ckpt-50000') # model.train() model.test(test_image_id=50, save_dir=model.test_image_save_dir) model.test(test_image_id=350, save_dir=model.test_image_save_dir) model.end()
def removing_objects(data): sets = [data.train_set, data.test_set] for i in sets: # Replacing str values with boolean 0 and 1 values sex = {'male': 0, 'female': 1} i.sex = [sex[item] for item in i.sex] smoker = {'no': 0, 'yes': 1} i.smoker = [smoker[item] for item in i.smoker] region = { 'northwest': 1, 'southeast': 2, 'northeast': 3, 'southwest': 4 } i.region = [region[item] for item in i.region] return Data(sets[0], sets[1])
import argparse import matplotlib.pyplot as plt import numpy as np from src.data.Data import Data from src.utils.Config import Config parser = argparse.ArgumentParser(description="") parser.add_argument('config') args = parser.parse_args() config = Config.from_file(args.config) data = Data(config.get_with_prefix("data")) dataset = data.build_val_dataset() for reference_images, reference_cam_poses, query_images, query_cam_poses, iou, room_ids, pose_transform, full_matching in dataset: fig = plt.figure() plt.imshow(np.concatenate((reference_images[0], query_images[0]), axis=1), extent=[0, data.image_size * 2, data.image_size, 0]) lines = [] def onclick(event): print('button=%d, x=%d, y=%d, xdata=%f, ydata=%f' % (event.button, event.x, event.y, event.xdata, event.ydata)) x = int(event.xdata) y = int(event.ydata) if 0 <= x < 128 and 0 <= y < 128: for line in lines:
from src.utils.Config import Config from src.utils.Inference import Inference import matplotlib.pyplot as plt import numpy as np import imageio import cv2 parser = argparse.ArgumentParser(description="") parser.add_argument('config') parser.add_argument('model_dir') parser.add_argument('image1') parser.add_argument('image2') args = parser.parse_args() config = Config.from_file(args.config) data = Data(config.get_with_prefix("data")) model = ExReNet(config.get_with_prefix("model"), data) model.load_weights(str(Path(args.model_dir) / "model.h5")) image1 = imageio.imread(args.image1) image1 = cv2.resize(image1, (data.image_size, data.image_size)) image2 = imageio.imread(args.image2) image2 = cv2.resize(image2, (data.image_size, data.image_size)) cam_pose, matched_coordinates, all_dots, matching = model(image1[None] / 255.0, image2[None] / 255.0, training=False) print("Click on the left image to see the matched point in the other image.") full_matching = np.zeros((32, 32, 2))
def load_drop_empty(train_set_path, test_set_path): train = pd.read_csv(train_set_path) test = pd.read_csv(test_set_path) return Data(train.dropna(), test.dropna())