def __init__(self, clf=None):
     if clf is None:
         self.clf = LinearSVC()
     else:
         self.clf = clf
     self.fe = FeatureExtractor()
     self.scaler = None
     self.valid_acc = 0
예제 #2
0
    def __prepare_training_data__(self, split, cache_root):
        """
        准备特征文件和标签文件
        特征文件包含一个numpy浮点型二维矩阵,N x L,N为样本总数,L为特征长度
        标签文件包含一个numpy二值型二维矩阵,N x C,N为样本总数,C为关系类别数
        :param split: train or val
        :param cache_root: save root
        :return: features, labels
        """
        feature_path = os.path.join(cache_root, '%s_features.bin' % split)
        label_path = os.path.join(cache_root, '%s_labels.bin' % split)
        if not os.path.exists(feature_path) or not os.path.exists(label_path):
            print('Extracting features for %s set ...' % split)
            time.sleep(2)

            imgs_path = os.path.join(self.dataset_root, '%s_images' % split)
            ano_path = os.path.join(self.dataset_root, 'annotations_%s.json' % split)
            features_builder = []
            gt_labels_builder = []
            feature_extractor = FeatureExtractor()
            with open(ano_path, 'r') as f:
                annotation_all = json.load(f)
            file_list = dict()
            for root, dir, files in os.walk(imgs_path):
                for file in files:
                    file_list[file] = os.path.join(root, file)
            for file in tqdm(file_list):
                ano = annotation_all[file]
                samples_info = []
                labels = []
                for sample in ano:
                    gt_predicates = sample['predicate']
                    gt_object_id = sample['object']['category']
                    gt_object_loc = sample['object']['bbox']
                    gt_subject_id = sample['subject']['category']
                    gt_subject_loc = sample['subject']['bbox']
                    samples_info.append(gt_subject_loc + [gt_subject_id] + gt_object_loc + [gt_object_id])
                    predicates = np.zeros(self.pre_category_num())
                    for p in gt_predicates:
                        predicates[p] = 1
                    labels.append(predicates.tolist())
                feature = feature_extractor.extract_features(cv2.imread(file_list[file]), samples_info)
                features_builder = features_builder + feature.tolist()
                gt_labels_builder = gt_labels_builder + labels
            features = np.array(features_builder)
            gt_labels = np.array(gt_labels_builder)
            with open(feature_path, 'wb') as fw:
                pickle.dump(features, fw)
            with open(label_path, 'wb') as fw:
                pickle.dump(gt_labels, fw)
        else:
            print('Loading data ...')
            with open(feature_path, 'rb') as f:
                features = pickle.load(f)
            with open(label_path, 'rb') as f:
                gt_labels = pickle.load(f)

        return features, gt_labels
class VehicleFeatureClassifier(object):
    def __init__(self, clf=None):
        if clf is None:
            self.clf = LinearSVC()
        else:
            self.clf = clf
        self.fe = FeatureExtractor()
        self.scaler = None
        self.valid_acc = 0

    def load_features_and_labels(self, car_path, noncar_path):
        '''Load images and extract features, then make training dataset.'''
        car_feat = self.fe.extract_features_from_imgs(car_path)
        noncar_feat = self.fe.extract_features_from_imgs(noncar_path)
        features = np.vstack((car_feat, noncar_feat)).astype(np.float64)
        # Normalize feature data
        self.scaler = StandardScaler().fit(features)
        self.features = self.scaler.transform(features)
        self.labels = np.hstack(
            [np.ones(len(car_feat)),
             np.zeros(len(noncar_feat))])

    def train_and_valid(self):
        '''
        Train loaded dataset using a scikit-learn classification method.
        The loaded dataset are splitted into train and valid parts.
        '''
        # Split dataset
        X_train, X_valid, y_train, y_valid = train_test_split(self.features,
                                                              self.labels,
                                                              test_size=0.2)
        self.clf.fit(X_train, y_train)
        self.valid_acc = self.clf.score(X_valid, y_valid)

    def predict(self, imgs):
        '''Predict images. Input should be a list of image arrays.'''
        features = self.fe.extract_features_from_imgs(imgs, path=False)
        features = self.scaler.transform(features)
        prediction = self.clf.predict(features)
        return prediction

    def save_fit(self, fname='saved_fit.p'):
        '''Save fitted classifier and scaler to a pickle file.'''
        saved_dict = {}
        saved_dict['clf'] = self.clf
        saved_dict['scaler'] = self.scaler
        with open(fname, 'wb') as f:
            pickle.dump(saved_dict, f)
        print('Fitted clf and scaler saved to {}.'.format(fname))

    def load_fit(self, fname='saved_fit.p'):
        '''Load'''
        with open(fname, 'rb') as f:
            saved_dict = pickle.load(f)
        self.clf = saved_dict['clf']
        self.scaler = saved_dict['scaler']
        print('Fitted clf and scaler loaded from {}.'.format(fname))
예제 #4
0
    def __init__(self,
                 double_dqn=True,
                 dueling_dqn=False,
                 gamma=0.9,
                 replace_target_iter=10,
                 batch_size=32,
                 memory_size=1000,
                 prioritized_replay=True,
                 n_episodes=100,
                 n_backup_episodes=10,
                 resume=False,
                 demo_dir=None,
                 demo_pretrain_steps=50,
                 data_dir=None,
                 log_dir=None,
                 model_dir=None,
                 supervised_model=None,
                 **kwargs):
        self.logger = logging.getLogger("TestDQN")
        self.data_dir = data_dir
        if not os.path.exists(data_dir):
            os.makedirs(data_dir)
        self.model_dir = model_dir if model_dir else self.data_dir
        self.log_dir = log_dir if log_dir else self.model_dir

        self.replay_memory = ReplayMemory(
            memory_size=memory_size, prioritized_replay=prioritized_replay)
        self.demo_memory = ReplayMemory(memory_size=memory_size,
                                        prioritized_replay=prioritized_replay)

        self.fe = FeatureExtractor(**kwargs)
        self.et = ExploreStrategy(n_episodes=n_episodes,
                                  feature_extractor=self.fe,
                                  supervised_model=supervised_model,
                                  **kwargs)

        self.double_dqn = double_dqn
        self.dueling_dqn = dueling_dqn
        self.q_eval, self.q_next = self._build_net()

        self.gamma = gamma
        self.replace_target_iter = replace_target_iter
        self.batch_size = batch_size
        self.n_episodes = n_episodes
        self.n_backup_episodes = n_backup_episodes
        self.resume = resume
        self.demo_dir = demo_dir
        self.demo_pretrain_steps = demo_pretrain_steps
        self._n_learn_steps = 0
예제 #5
0
    def __init__(self,
                 gamma=0.99,
                 lr=0.1,
                 no_sub_policy=False,
                 n_episodes=100,
                 n_backup_episodes=10,
                 resume=False,
                 supervised_model=None,
                 data_dir=None,
                 log_dir=None,
                 model_dir=None,
                 **kwargs):
        self.logger = logging.getLogger("TestQTable")
        self.data_dir = data_dir
        if not os.path.exists(data_dir):
            os.makedirs(data_dir)
        self.model_dir = model_dir if model_dir else self.data_dir
        self.log_dir = log_dir if log_dir else self.model_dir

        self.fe = FeatureExtractor(**kwargs)
        self.et = ExploreStrategy(n_episodes=n_episodes,
                                  feature_extractor=self.fe,
                                  supervised_model=supervised_model,
                                  **kwargs)
        self.gamma = gamma
        self.lr = lr
        self.no_sub_policy = no_sub_policy
        self.n_episodes = n_episodes
        self.n_backup_episodes = n_backup_episodes
        self.resume = resume
        self.q_table = {}
        self.form_manager = FormManager()
 def __init__(self, form):
     self.form = form
     self.fe = FeatureExtractor(
         # state representation
         use_screenshot=False, use_dom_style=False, use_dom_type=False, use_dom_embed=True, use_dom_interacted=False,
         # query representation
         use_query_embed=True, use_query_score=True,
         # action representation
         use_action_type=False, use_action_query_sim=True,
         # query intersection representation
         use_sim_non_para=True, use_sim_para_val=False, use_sim_para_anno=True, merge_para_sim=False,
         # feature dimensions
         feature_width=100, feature_height=100, text_embed_dim=10, n_para_dim=10,
         # misc
         disable_text_embed=False, feature_cache_size=5000, work_dir=None
     )
     self.net = self._build_net()
예제 #7
0
	def __init__(self, cap):
		self.frame_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH) // 2)
		self.frame_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT) // 2)
		self.total_frame = cap.get(cv2.CAP_PROP_FRAME_COUNT)
		print('WIDTH: {}, HEIGHT: {}, FRAME_COUNT: {}'.format(self.frame_width, self.frame_height, self.total_frame))

		self.feature_extractor = FeatureExtractor()

		self.frame_idx = 0

		# frame: keypoints, poses and 3D points
		self.frames = []
		# point: keypoints index, frame index
		self.points = []
		self.K = np.array([[645.2, 0, self.frame_width//2],
						   [0, 645.2, self.frame_height//2],
						   [0, 0, 1]])
예제 #8
0
def main():
	stereo_camera_gt, stereo_images, stereo_image_files = loader.get_stereo_data()
	# pick two consecutive images
	K = stereo_camera_gt[stereo_image_files[0]]["K"]
	print("calibration matrix: {}".format(K))
	ind_1, ind_2 = 0, 1
	img1_key, img2_key = stereo_image_files[ind_1], stereo_image_files[ind_2]
	detector = FeatureExtractor()
	kp1, des1 = detector.feature_detecting(stereo_images[img1_key], mode='orb')
	kp2, des2 = detector.feature_detecting(stereo_images[img2_key], mode='orb')
	kp1, kp2 = np.array([item.pt for item in kp1]), np.array([item.pt for item in kp2])

	kp1_inliers, kp2_inliers = detector.feature_matching(kp1, des1, kp2, des2, K)
	assert len(kp1_inliers) == len(kp2_inliers)
	print("{} matches found.".format(len(kp1_inliers)))
	pts1, pts2 = kp1[kp1_inliers], kp2[kp2_inliers]
	pts1, pts2 = Homogenize(pts1.T), Homogenize(pts2.T)
	n = pts1.shape[1]
	norm_pts1, norm_pts2 = NormalizePoints(pts1, K), NormalizePoints(pts2, K)
	# use the normalized points to estimate essential matrix
	F = DLT_F(pts1, pts2)
	E = K.T @ F @ K
	print('rank of essential: {}'.format(np.linalg.matrix_rank(E)))
	print("Essential Matrix: {}".format(E))

	# decompose the essential matrix into two projective matrices
	# P1 = [I | 0] -> pts1, P2 = [R | t]
	I, P2 = Decompose_Essential(E, norm_pts1, norm_pts2, mode='matrix')

	Rt1 = np.hstack((stereo_camera_gt[img1_key]["R"], stereo_camera_gt[img1_key]["t"]))
	Rt2 = np.hstack((stereo_camera_gt[img2_key]["R"], stereo_camera_gt[img2_key]["t"]))
	P1, P3 = Project_Essential(I, P2, Rt1)
	print("Rt2: {}".format(Rt2))
	print("P3: {}".format(P3))
	E_compose = Compose_Essential(Rt1, Rt2)
	print('rank of essential: {}'.format(np.linalg.matrix_rank(E_compose)))
	U, d, Vt = np.linalg.svd(E_compose)
	print(d)
	print("Ground Truth Essential: {}".format(E_compose))

	assert np.allclose(E, E_compose)
	print("PASS")
예제 #9
0
    def __prepare_testing_data__(self, cache_root):
        """
        准备特征文件
        特征文件包含一个numpy浮点型二维矩阵,N x L,N为样本总数,L为特征长度
        :param cache_root: save root
        :return: features, labels=None
        """
        feature_path = os.path.join(cache_root, 'test_features.bin')
        if not os.path.exists(feature_path):
            print('Extracting features for test set ...')
            time.sleep(2)

            imgs_path = os.path.join(self.dataset_root, 'test_images')
            ano_path = os.path.join(self.dataset_root, 'annotations_test_so.json')
            features_builder = []
            feature_extractor = FeatureExtractor()
            with open(ano_path, 'r') as f:
                annotation_all = json.load(f)
            file_list = dict()
            for root, dir, files in os.walk(imgs_path):
                for file in files:
                    file_list[file] = os.path.join(root, file)
            for file in tqdm(file_list):
                ano = annotation_all[file]
                samples_info = []
                for sample in ano:
                    gt_object_id = sample['object']['category']
                    gt_object_loc = sample['object']['bbox']
                    gt_subject_id = sample['subject']['category']
                    gt_subject_loc = sample['subject']['bbox']
                    samples_info.append(gt_subject_loc + [gt_subject_id] + gt_object_loc + [gt_object_id])
                feature = feature_extractor.extract_features(cv2.imread(file_list[file]), samples_info)
                features_builder = features_builder + feature.tolist()
            features = np.array(features_builder)
            with open(feature_path, 'wb') as fw:
                pickle.dump(features, fw)
        else:
            print('Loading data ...')
            with open(feature_path, 'rb') as f:
                features = pickle.load(f)

        return features, None
예제 #10
0
def main():
    configure_logs(logging.INFO)
    experiment = create_experiment()
    experiment.set_name(f'Test {datetime.datetime.now()}')

    # Parameters
    num_episode = 300
    batch_size = 5
    learning_rate = 0.02
    initial_factor = 1.1
    gamma = 0.99
    memory = Memory()
    feature_extractor = FeatureExtractor()

    policy_net = PolicyNet(
        real_dim=feature_extractor.REAL_FEATURES_NUM,
        cat_dims=feature_extractor.CAT_DIMS,
        emb_dim=10,
        global_memory=memory)
예제 #11
0
    def __init__(self,
                 data_dir,
                 log_dir=None,
                 model_dir=None,
                 batch_size=64,
                 n_episodes=200,
                 n_backup_episodes=10,
                 resume=False,
                 **kwargs):
        self.logger = logging.getLogger("ExplorationModel")
        self.fe = FeatureExtractor(**kwargs)
        self.model = None
        self.batch_size = batch_size
        self.n_episodes = n_episodes
        self.n_backup_episodes = n_backup_episodes
        self.resume = resume

        self.data_dir = data_dir
        if not os.path.exists(data_dir):
            os.makedirs(data_dir)
        self.model_dir = model_dir if model_dir else self.data_dir
        self.log_dir = log_dir if log_dir else self.model_dir

        self._build_net()
예제 #12
0
class FormActor:
    def __init__(self, form):
        self.form = form
        self.fe = FeatureExtractor(
            # state representation
            use_screenshot=False, use_dom_style=False, use_dom_type=False, use_dom_embed=True, use_dom_interacted=False,
            # query representation
            use_query_embed=True, use_query_score=True,
            # action representation
            use_action_type=False, use_action_query_sim=True,
            # query intersection representation
            use_sim_non_para=True, use_sim_para_val=False, use_sim_para_anno=True, merge_para_sim=False,
            # feature dimensions
            feature_width=100, feature_height=100, text_embed_dim=10, n_para_dim=10,
            # misc
            disable_text_embed=False, feature_cache_size=5000, work_dir=None
        )
        self.net = self._build_net()

    def _build_net(self):
        # input: form, actions
        action_loc = Input(shape=(self.fe.height, self.fe.width, 1))
        dom_sim = Input(shape=(self.fe.height, self.fe.width, 1))
        input_comb_vec = Input(shape=(len(self.form.get_input_comb_vec()),))
        dom_embed = Input(shape=(self.fe.height, self.fe.width, self.fe.text_embed_dim))
        para_value_embed = Input(shape=(self.fe.text_embed_dim,))
        para_anno_embed = Input(shape=(self.fe.text_embed_dim,))

        dom_conv = Sequential(layers=[
            Conv2D(1, (7, 7), padding="same", activation="relu"),
            Conv2D(1, (7, 7), padding="same", activation="relu"),
            Conv2D(1, (7, 7), padding="same", activation="sigmoid")
        ])

        action_conv = Sequential(layers=[
            Conv2D(1, (3, 3), padding="valid", activation="relu"),
            MaxPooling2D(),
            Conv2D(1, (3, 3), padding="valid", activation="relu"),
            MaxPooling2D(),
            Conv2D(1, (3, 3), padding="valid", activation="relu"),
            MaxPooling2D(),
            Conv2D(1, (3, 3), padding="valid", activation="relu"),
            MaxPooling2D(),
            Flatten()
        ])

        def compute_sim(x):
            channel_product = K.prod(K.concatenate(x, axis=-1), axis=-1)
            return K.mean(channel_product, axis=[1,2])

        def get_action_vec(x):
            return K.concatenate(x, axis=-1)

        dom_sim_loc = dom_conv(dom_sim)
        sim_q = Lambda(compute_sim)([action_loc, dom_sim_loc])

        action_loc_vec = action_conv(action_loc)
        action_vec = Lambda(get_action_vec)([action_loc_vec, para_anno_embed, para_value_embed])
        action_q = Dense(1, activation="sigmoid")(action_vec)

        input_comb_q = Dense(1, activation="sigmoid")(input_comb_vec)
        phi = 0.1

        # q = Lambda(lambda x: K.sum([K.expand_dims(x[0], 1), phi * x[1]], axis=0))([sim_q, action_q])
        # net = Model(inputs=[action_loc, dom_sim, para_value_embed, para_anno_embed], outputs=q)
        q = Lambda(lambda x: K.sum([K.expand_dims(x[0], 1), phi * x[1]], axis=0))([sim_q, input_comb_q])
        net = Model(inputs=[action_loc, dom_sim, input_comb_vec], outputs=q)

        optimizer = keras.optimizers.Adam(lr=0.01)
        net.compile(loss='mse', optimizer=optimizer, metrics=['accuracy'])
        # print(net.summary())
        return net

    def learn(self):
        best_input_comb = self.form.best_input_comb
        positive_samples = []
        negative_samples = []
        previous_actions = []
        for (action_type, action_ele) in best_input_comb:
            value_candidates = self.form.input_candidates[(action_type, action_ele)]
            best_value = best_input_comb[(action_type, action_ele)]
            best_action = None
            for value in value_candidates:
                action = Action(action_ele, action_type, value)
                encoding = self.encode(self.form, previous_actions, action)
                if value == best_value:
                    positive_samples.append(encoding)
                else:
                    negative_samples.append(encoding)
            if best_action:
                previous_actions.append(best_action)
        # negative_samples = random.sample(negative_samples, len(positive_samples))
        samples = positive_samples + negative_samples
        if len(samples) <= 0:
            return None
        samples = self.zip_inputs(samples)
        labels = [1.0] * len(positive_samples) + [0.0] * len(negative_samples)
        history = self.net.fit(x=samples, y=np.array(labels), epochs=5, verbose=0)
        # i = random.randint(0, len(positive_samples) - 1)
        # self.fe.show_image("output/positive_action_loc.png", positive_samples[i][0])
        # self.fe.show_image("output/positive_dom_sim.png", positive_samples[i][1])
        return history.history["loss"][-1]

    def zip_inputs(self, inputs):
        return list([np.array(x) for x in zip(*inputs)])

    def encode(self, form, previous_actions, action):
        task = form.task
        assert isinstance(task, Task)
        action_loc = self.fe.get_action_feature_image(action)
        dom_sim = np.zeros([self.fe.height, self.fe.width, 1])
        value_text, value_annotation = "", ""
        if action.value is not None:
            value_text = action.value_text_parsed
            value_annotation = task.get_parameter_surrounding_word_parsed(value_text)
            dom_sim = self.get_dom_similarity_feature(task.state, value_annotation)
        # para_value_vec = Utils.vec(value_text)[:self.fe.text_embed_dim]
        # para_anno_vec = Utils.vec(value_annotation)[:self.fe.text_embed_dim]
        # return action_loc, dom_sim, para_value_vec, para_anno_vec
        input_comb_vec = self.form.get_input_comb_vec(Form.convert_actions_to_input_comb(previous_actions + [action]))
        return action_loc, dom_sim, input_comb_vec

    def evaluate(self, form, previous_actions, action):
        input = self.encode(form, previous_actions, action)
        q = self.net.predict(x=self.zip_inputs([input]))
        return float(q)

    def get_dom_similarity_feature(self, state, text_parsed):
        def compute_feature(element, text_parsed):
            if not element.own_text_parsed or not text_parsed:
                return None
            return Utils.text_similarity(text_parsed, element.own_text_parsed)

        def merge_features(old_ele_feature, new_ele_feature):
            return np.max([old_ele_feature, new_ele_feature], axis=0)

        feature = np.zeros([self.fe.height, self.fe.width, 1])
        self.fe._render_feature_recursively(feature, merge_features, compute_feature, state.root_element, text_parsed)
        return feature
예제 #13
0
    def __prepare_data__(self, split, cache_root, mode):
        
        histogram_path = os.path.join(cache_root, '%s_histograms.bin' % split)
        dir_path = os.path.join(cache_root, '%s_dir.bin' % split)
        ratio_path = os.path.join(cache_root, '%s_ratio.bin' % split)
        sub_path = os.path.join(cache_root, '%s_subs.bin' % split)
        obj_path = os.path.join(cache_root, '%s_objs.bin' % split)

        filepath_list = [histogram_path, dir_path, ratio_path, sub_path, obj_path]
        
        if mode == 0:
            label_path = os.path.join(cache_root, '%s_labels.bin' % split)
            filepath_list = filepath_list + [label_path]
            
        file_not_exist = False
        for filepath in filepath_list:
            if not os.path.exists(filepath):
                file_not_exist = True

        if file_not_exist:
            print('Extracting features for %s set ...' % split)
            time.sleep(2)
            
            if split.find('checking') != -1:
                split = 'checking'
            imgs_path = os.path.join(self.dataset_root, '%s_images' % split)
            ano_path = os.path.join(self.dataset_root, 'annotations_%s.json' % split)
            
            # "여러개" 사진에 대한 여러 훈련 데이터 
            histogram_builder = []
            direction_builder = []
            ratio_builder = []
            sub_id_builder = []
            obj_id_builder = []
            if mode == 0:
                gt_labels_builder = []

            feature_extractor = FeatureExtractor()

            # annotations 파일 읽어드려서 annotation_all 작성
            with open(ano_path, 'r') as f:
                annotation_all = json.load(f)

            file_list = dict()
            # file_list = {'file name': file path, ... }
            for root, dir, files in os.walk(imgs_path):
                for file in files:
                    file_list[file] = os.path.join(root, file)

            # file_list의 'file name' 순서대로 하나씩 선택
            for file in tqdm(sorted(file_list.keys())):
                # ano =  [{
                #   "predicate": [3],
                #   "subject": {"category": 0, "bbox": [230, 72, 415, 661]},
                #   "object": {"category": 20, "bbox": [442, 292, 601, 433]}
                #   }, ...]
                ano = annotation_all[file]

                """
                * 이 list들 에다가 "한개" 사진에 대한 여러 훈련 데이터들을 append 할 예정임
                samples_infos 형태 : [[],  [],  ...] -> 각 [] 형태  = [sub_color_hist, sub_grad_hist, obj_color_hist, obj_grad_hist] (총 길이 : 2184)
                directions 형태    : [[],  [],  ...] -> 각 [] 형태  = [one_hot] (총 길이 : 16)
                ratios 형태        : [[],  [],  ...] -> 각 [] 형태  = [one_hot, float value] (총 길이 : 9)
                sub_ids 형태       : [int, int, ...]
                obj_ids 형태       : [int, int, ...]
                labels 형태        : [[],  [],  ...] -> 각 [] 형태  = [one_hot] (총 길이 : 70)
                """
                samples_infos = [] 
                directions = []    
                ratios = []        
                sub_ids = []       
                obj_ids = []
                if mode == 0:
                    labels = []        

                for sample in ano:
                    # "한개" 사진의 "한개" 훈련 데이터의 data parsing
                    gt_predicates = sample['predicate']
                    gt_object_id = sample['object']['category']     # int 형
                    gt_object_loc = sample['object']['bbox']        # list 형
                    gt_subject_id = sample['subject']['category']   # int 형
                    gt_subject_loc = sample['subject']['bbox']      # list 형

                    # "한개" 사진의 "한개" 훈련 데이터의 subject와 object의 방향 정보 one_hot 형태로 획득
                    sub_bbox_center = feature_extractor.cal_bbox_center(gt_subject_loc[0], gt_subject_loc[1], gt_subject_loc[2], gt_subject_loc[3])
                    obj_bbox_center = feature_extractor.cal_bbox_center(gt_object_loc[0],gt_object_loc[1], gt_object_loc[2], gt_object_loc[3])
                    dir_compass = feature_extractor.cal_sub2obj_direction(sub_bbox_center[0], obj_bbox_center[0], sub_bbox_center[1], obj_bbox_center[1])
                    dir_one_hot = feature_extractor.one_hot(dir_compass)

                    # "한개" 사진의 "한개" 훈련 데이터의 subject와 object bbox 비율 및 면적 정보 획득
                    bbox_ratio_with_Area = feature_extractor.cal_bbox_WnH(gt_subject_loc, gt_object_loc, add_area_ratio=1)


                    # 획득한 정보들 추가
                    samples_infos.append(gt_subject_loc + [gt_subject_id] + gt_object_loc + [gt_object_id]) # [sub_loc] + [sub_id] + [obj_loc] + [obj_id] ==> [sub_loc, sub_id, obj_loc, obj_id]
                    directions.append(dir_one_hot.tolist())
                    ratios.append(bbox_ratio_with_Area.tolist())
                    sub_ids.append([gt_subject_id])
                    obj_ids.append([gt_object_id])
                    if mode == 0:
                        predicates = np.zeros(self.pre_category_num()) # predicates를 one_hot 형태로 변경
                        for p in gt_predicates:
                            predicates[p] = 1
                        labels.append(predicates.tolist())

                # one_pic_histograms 형태 : np.array([],[],[], ...)
                one_pic_histograms = feature_extractor.extract_features(cv2.imread(file_list[file]), samples_infos)
                # one_pic_histograms.tolist() 형태 : [[],[],[], ...] + [[],[],[], ...] = [[],[],[],[],[],[], ...]
                histogram_builder = histogram_builder + one_pic_histograms.tolist()
                direction_builder = direction_builder + directions
                ratio_builder = ratio_builder + ratios
                sub_id_builder = sub_id_builder + sub_ids
                obj_id_builder = obj_id_builder + obj_ids
                if mode == 0:
                    gt_labels_builder = gt_labels_builder + labels

            total_pic_histograms = np.array(histogram_builder)
            total_pic_directions = np.array(direction_builder)
            total_pic_ratios = np.array(ratio_builder)
            total_pic_sub_ids = np.array(sub_id_builder)
            total_pic_obj_ids = np.array(obj_id_builder)
            if mode == 0:
                total_pic_labels = np.array(gt_labels_builder)

            with open(histogram_path, 'wb') as fw:
                pickle.dump(total_pic_histograms, fw)
            with open(dir_path, 'wb') as fw:
                pickle.dump(total_pic_directions, fw)
            with open(ratio_path, 'wb') as fw:
                pickle.dump(total_pic_ratios, fw)
            with open(sub_path, 'wb') as fw:
                pickle.dump(total_pic_sub_ids, fw)
            with open(obj_path, 'wb') as fw:
                pickle.dump(total_pic_obj_ids, fw)
            if mode == 0:
                with open(label_path, 'wb') as fw:
                    pickle.dump(total_pic_labels, fw)

        else:
            print('Loading data ...')
            with open(histogram_path, 'rb') as f:
                total_pic_histograms = pickle.load(f)
            with open(dir_path, 'rb') as f:
                total_pic_directions = pickle.load(f)
            with open(ratio_path, 'rb') as f:
                total_pic_ratios = pickle.load(f)
            with open(sub_path, 'rb') as f:
                total_pic_sub_ids = pickle.load(f)
            with open(obj_path, 'rb') as f:
                total_pic_obj_ids = pickle.load(f)
            if mode == 0:
                with open(label_path, 'rb') as f:
                    total_pic_labels = pickle.load(f)
        
        if mode == 0:
            return np.concatenate((total_pic_histograms, total_pic_directions, total_pic_ratios, total_pic_sub_ids, total_pic_obj_ids), axis=1), total_pic_labels 
        elif mode == 1:
            return np.concatenate((total_pic_histograms, total_pic_directions, total_pic_ratios, total_pic_sub_ids, total_pic_obj_ids), axis=1)
        else:
            print("ARGUMENT_ERROR : mode should be 0 or 1")
            raise ValueError
예제 #14
0
def show_ImgFeatureInfo(imgPath, sub_region, obj_region, sub_name, obj_name,
                        pred_name):
    feature = FeatureExtractor()
    img = cv2.imread(imgPath)

    sub_color = (0, 0, 255)
    obj_color = (255, 0, 0)
    pred_color = (219, 112, 147)
    attention_color = (0, 255, 0)
    '''
    Subject 정보 사진에 나타내기
    '''
    # subject bbox
    cv2.rectangle(img, (sub_region[0], sub_region[3]),
                  (sub_region[2], sub_region[1]),
                  sub_color,
                  thickness=3)
    # subject label
    cv2.putText(img,
                "Subject : " + sub_name, (sub_region[0], sub_region[1] - 20),
                fontFace=cv2.FONT_HERSHEY_SIMPLEX,
                fontScale=0.9,
                color=sub_color,
                thickness=2)
    '''
    Object 정보 사진에 나타내기
    '''
    # object bbox
    cv2.rectangle(img, (obj_region[0], obj_region[3]),
                  (obj_region[2], obj_region[1]),
                  obj_color,
                  thickness=3)
    # object label
    cv2.putText(img,
                "Object : " + obj_name, (obj_region[0], obj_region[1] - 20),
                fontFace=cv2.FONT_HERSHEY_SIMPLEX,
                fontScale=0.9,
                color=obj_color,
                thickness=2)
    '''
    Predicate 정보 사진에 나타내기
    '''
    # subject center (x,y)
    sub_center = feature.cal_bbox_center(sub_region[0], sub_region[1],
                                         sub_region[2], sub_region[3])
    # object center (x,y)
    obj_center = feature.cal_bbox_center(obj_region[0], obj_region[1],
                                         obj_region[2], obj_region[3])
    # predicate arrowed line
    cv2.arrowedLine(img, sub_center, obj_center, color=pred_color, thickness=3)
    # predicate arrowed line direction
    direction = feature.cal_sub2obj_direction(sub_center[0],
                                              obj_center[0],
                                              sub_center[1],
                                              obj_center[1],
                                              print_mode=1)
    cv2.putText(img,
                "(" + direction + ")",
                (obj_center[0] + 10, obj_center[1] - 30),
                fontFace=cv2.FONT_HERSHEY_SIMPLEX,
                fontScale=0.9,
                color=pred_color,
                thickness=3)
    # predicate label
    cv2.putText(img,
                pred_name, (22, 22),
                fontFace=cv2.FONT_HERSHEY_SIMPLEX,
                fontScale=0.9,
                color=pred_color,
                thickness=2)

    attention_region = find_attention_window(sub_region, obj_region)
    cv2.rectangle(img, (attention_region[0], attention_region[3]),
                  (attention_region[2], attention_region[1]),
                  attention_color,
                  thickness=1)
    '''
    모든 정보가 포함된 사진 보여주기
    '''
    cv2.imshow("Image with Infos", img)
예제 #15
0
        prob_dist = ProbDist()

        for label in self.label_prob.keys():
            prob_dist.set(label, self.label_prob.logprob(label))
            for feat, val in features_copy.iteritems():
                if (label, feat) not in self.label_feat_prob:
                    print "ERROR"
                p_dist = self.label_feat_prob[label, feat]
                prob_dist.inc(label, p_dist.logprob(val))
        return prob_dist


if __name__ == '__main__':

    reader = TokenReader()
    feature_extractor = FeatureExtractor({'suf':'3'})
    sentences = []
    sentences = reader.read_whitespaced_tokens("data/train_chunk.txt")
    sentences.extend(reader.read_whitespaced_tokens("data/test_chunk.txt"))
    naive_bayes = NaiveBayesClassifier()
    featureset = []
    correct_answers = []
    print "Extracting features."
    for sentence in sentences:
        for i, token in enumerate(sentence):
            features = feature_extractor.extract_features(sentence, i) 
            label = token.tags["POS"]
            featureset.append((label, features))
    print "Extracted for {0} tokens".format(len(featureset))
    size = int(len(featureset) * 0.1)
    train_set, test_set = featureset[size:], featureset[:size]
예제 #16
0
class SLAMController:

	def __init__(self, cap):
		self.frame_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH) // 2)
		self.frame_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT) // 2)
		self.total_frame = cap.get(cv2.CAP_PROP_FRAME_COUNT)
		print('WIDTH: {}, HEIGHT: {}, FRAME_COUNT: {}'.format(self.frame_width, self.frame_height, self.total_frame))

		self.feature_extractor = FeatureExtractor()

		self.frame_idx = 0

		# frame: keypoints, poses and 3D points
		self.frames = []
		# point: keypoints index, frame index
		self.points = []
		self.K = np.array([[645.2, 0, self.frame_width//2],
						   [0, 645.2, self.frame_height//2],
						   [0, 0, 1]])

	def __str__(self):
		return "Controller: frames: width {} height {} total {}".format(self.frame_width, self.frame_height, self.total_frame)

	def process_frame(self, frame):
		'''
		main controller function that does basically everything
		'''
		# do nothing if it is the first frame
		
		image, curr_frame = self.preprocessing_frame(frame)
		self.frames.append(curr_frame)

		if self.frame_idx - 1 < 0:
			self.frame_idx += 1
			return image, None
		if self.frame_idx >= self.total_frame:
			# TODO: throw exceptions
			print("current frame out of bounds")
			return None, None

		prev_frame = self.frames[self.frame_idx - 1]
		# if we can find keypoints for both frames
		if prev_frame.kps is not None and curr_frame.kps is not None:
			
			# indices for matched keypoints
			curr_inliers, prev_inliers = self.feature_extractor.feature_matching(curr_frame.kps, curr_frame.des, prev_frame.kps, prev_frame.des, self.K)
			
			# update connection graph between the two frames
			prev_frame.rightInliers = prev_inliers
			curr_frame.leftInliers = curr_inliers
			
			if prev_frame.pose is None:
				# use matches to calculate fundamental matrix
				# perform triangulation with P = [I | 0] and P' = [M | v]

				self.TwoViewPoseEstimation(curr_frame, prev_frame, image=frame)
			else:
				# find the 3D points in the previous frame
				# EPnP for pose estimation to only update current frame's camera pose
				self.EssentialPoseEstimation(curr_frame, prev_frame, image=frame)

			# shape of matches: 2 x n x 2
			# post-processing the keypoints data
			kp1 = curr_frame.kps[curr_inliers]
			kp2 = prev_frame.kps[prev_inliers]
			matches = np.stack((kp1, kp2), axis=0)
		
			# clear keypoints and descriptors from the previous model after matching, memory efficiency
			# prev_model.clear()
		self.frame_idx += 1
		return image, matches

	# any preprocessing functionality here
	def preprocessing_frame(self, frame):
		frame_resize = cv2.resize(frame, dsize=(self.frame_width, self.frame_height))
		kps, des = self.feature_extractor.feature_detecting(frame_resize, mode='feat')
		kps = np.array([item.pt for item in kps])
		print('changing keypoints to np array')
		model = Frame(kps, des)
		return frame_resize, model

	def find_union_intersection(self, curr_frame, prev_frame):
		poseIdx, triIdx = [], []
		for i, item in enumerate(prev_frame.rightInliers):
			if item in prev_frame.leftInliers:
				poseIdx.append((item, curr_frame.leftInliers[i]))
			else:
				triIdx.append((item, curr_frame.leftInliers[i]))
		assert len(poseIdx) + len(triIdx) == len(curr_frame.leftInliers)
		return poseIdx, triIdx

	# DLT estimation for Projective Matrix P, 
	# given 3D points from previous frames and 2D points in current frames
	def AbsolutePoseEstimation(self, curr_frame, prev_frame):
		assert(len(prev_frame.rightInliers) == len(curr_frame.leftInliers)) 
		poseIdx, triIdx = self.find_union_intersection(curr_frame, prev_frame)
		pts3D = prev_frame.get_3D_points([idx[0] for idx in poseIdx])
		X = np.hstack([item.get_data().reshape(-1,1) for item in pts3D])
		# find the 2d points that are related to 3d points
		pts2D = curr_frame.kps[[idx[1] for idx in poseIdx]]
		x = np.hstack([item.reshape(-1,1) for item in pts2D])
		x_hat = NormalizePoints(Homogenize(x), self.K)
		P = EPnP(x_hat, Dehomogenize(X))
		for i, item in enumerate(poseIdx):
			pts3D[i].add_observation(point=curr_frame.kps[item[1]].reshape(-1,1), frame_idx=self.frame_idx)
			curr_frame.add_3D_point(item[1], pts3D[i])
			
		curr_frame.pose = Pose(P[:, :3], P[:, -1])

		pts1, pts2 = prev_frame.kps[[idx[0] for idx in triIdx]], curr_frame.kps[[idx[1] for idx in triIdx]]
		pts1, pts2 = Homogenize(pts1.T), Homogenize(pts2.T)
		n = pts1.shape[1]
		norm_pts1, norm_pts2 = NormalizePoints(pts1, self.K), NormalizePoints(pts2, self.K)
		Xs = Triangulation(norm_pts1, norm_pts2, prev_frame.pose.P(), curr_frame.pose.P(), option='linear', verbose=False)
		assert Xs.shape[1] == len(triIdx)
		for i, item in enumerate(triIdx):
			p3d = Point3D(Xs[:, i].reshape(-1,1))
			# add new 3d point
			self.points.append(p3d)
			p3d.add_observation(point=prev_frame.kps[item[0]].reshape(-1,1), frame_idx=self.frame_idx-1)
			p3d.add_observation(point=curr_frame.kps[item[1]].reshape(-1,1), frame_idx=self.frame_idx)
			prev_frame.add_3D_point(item[0], p3d)
			curr_frame.add_3D_point(item[1], p3d)
		assert curr_frame.has_all(curr_frame.leftInliers)

	def EssentialPoseEstimation(self, curr_frame, prev_frame, image):
		pts1, pts2 = prev_frame.kps[prev_frame.rightInliers], curr_frame.kps[curr_frame.leftInliers]
		pts1, pts2 = Homogenize(pts1.T), Homogenize(pts2.T)
		n = pts1.shape[1]
		norm_pts1, norm_pts2 = NormalizePoints(pts1, self.K), NormalizePoints(pts2, self.K)
		E = DLT_E(norm_pts1, norm_pts2)
		I, P2 = Decompose_Essential(E, norm_pts1, norm_pts2)
		_, P3 = Project_Essential(I, P2, prev_frame.pose.P())
		assert(len(prev_frame.rightInliers) == len(curr_frame.leftInliers)) 
		poseIdx, triIdx = self.find_union_intersection(curr_frame, prev_frame)
		pts3D = prev_frame.get_3D_points([idx[0] for idx in poseIdx])
		for i, item in enumerate(poseIdx):
			pts3D[i].add_observation(point=curr_frame.kps[item[1]].reshape(-1,1), frame_idx=self.frame_idx)
			curr_frame.add_3D_point(item[1], pts3D[i])

		curr_frame.pose = Pose(P3[:, :3], P3[:, -1])
		pts1, pts2 = prev_frame.kps[[idx[0] for idx in triIdx]], curr_frame.kps[[idx[1] for idx in triIdx]]
		pts1, pts2 = Homogenize(pts1.T), Homogenize(pts2.T)
		n = pts1.shape[1]
		norm_pts1, norm_pts2 = NormalizePoints(pts1, self.K), NormalizePoints(pts2, self.K)
		Xs = Triangulation(norm_pts1, norm_pts2, prev_frame.pose.P(), curr_frame.pose.P(), option='linear', verbose=False)
		assert Xs.shape[1] == len(triIdx)
		for i, item in enumerate(triIdx):
			pt = curr_frame.kps[item[1]].astype(int)
			p3d = Point3D(Xs[:, i].reshape(-1,1), image[pt[0], pt[1], :].reshape(3,1))
			# add new 3d point
			self.points.append(p3d)
			p3d.add_observation(point=prev_frame.kps[item[0]].reshape(-1,1), frame_idx=self.frame_idx-1)
			p3d.add_observation(point=curr_frame.kps[item[1]].reshape(-1,1), frame_idx=self.frame_idx)
			prev_frame.add_3D_point(item[0], p3d)
			curr_frame.add_3D_point(item[1], p3d)
		assert curr_frame.has_all(curr_frame.leftInliers)

	def TwoViewPoseEstimation(self, curr_frame, prev_frame, image):
		# creation of essential matrix and 3D points assuming the first pose (f2) is [I | 0], the second pose (f1) is [R | t]
		# save for testing
		pts1, pts2 = prev_frame.kps[prev_frame.rightInliers], curr_frame.kps[curr_frame.leftInliers]
		pts1, pts2 = Homogenize(pts1.T), Homogenize(pts2.T)
		n = pts1.shape[1]
		norm_pts1, norm_pts2 = NormalizePoints(pts1, self.K), NormalizePoints(pts2, self.K)
		# use the normalized points to estimate essential matrix
		E = DLT_E(norm_pts1, norm_pts2)
		P1, P2 = Decompose_Essential(E, norm_pts1, norm_pts2)
		print('First camera R: {} t: {}'.format(P1[:, :3], P1[:, -1]))
		print('Second camera R: {} t: {}'.format(P2[:, :3], P2[:, -1]))
		prev_frame.pose = Pose(P1[:, :3], P1[:, -1])
		curr_frame.pose = Pose(P2[:, :3], P2[:, -1])
		Xs = Triangulation(norm_pts1, norm_pts2, P1, P2, option='linear', verbose=False)
		assert Xs.shape[1] == n
		for i in range(n):
			pt = curr_frame.kps[curr_frame.leftInliers[i]].astype(int)
			p3d = Point3D(Xs[:, i].reshape(-1,1), image[pt[0], pt[1], :].reshape(3,1))
			# add new 3d point
			self.points.append(p3d)
			p3d.add_observation(point=prev_frame.kps[prev_frame.rightInliers[i]].reshape(-1,1), frame_idx=self.frame_idx-1)
			p3d.add_observation(point=curr_frame.kps[curr_frame.leftInliers[i]].reshape(-1,1), frame_idx=self.frame_idx)
			prev_frame.add_3D_point(prev_frame.rightInliers[i], p3d)
			curr_frame.add_3D_point(curr_frame.leftInliers[i], p3d)
		assert n == len(curr_frame.leftInliers)

	def Optimization(self):
		pass
예제 #17
0
import time

import cv2
import numpy as np
from common import BOX_SIZE, LABEL_NORMAL
from feature import FeatureExtractor
from classifier import DeepClassification

dc = DeepClassification()
fe = FeatureExtractor()


class Box:
    def __init__(self, top_left_coord, top_right_coord, bottom_right_coord,
                 bottom_left_coord, pixel_array):
        self.__top_left_coord = top_left_coord
        self.__top_right_coord = top_right_coord
        self.__bottom_right_coord = bottom_right_coord
        self.__bottom_left_coord = bottom_left_coord
        self.__pixel_array = pixel_array
        self.__feature_array = None
        self.__labels = list()
        self.__confidences = list()
        self.__lung_pixels = list()

    def set_a_lung_pixel(self, x, y):
        self.__lung_pixels.append([x, y])

    def get_lung_pixels(self):
        return self.__lung_pixels
예제 #18
0
class TestDQN:
    def __init__(self,
                 double_dqn=True,
                 dueling_dqn=False,
                 gamma=0.9,
                 replace_target_iter=10,
                 batch_size=32,
                 memory_size=1000,
                 prioritized_replay=True,
                 n_episodes=100,
                 n_backup_episodes=10,
                 resume=False,
                 demo_dir=None,
                 demo_pretrain_steps=50,
                 data_dir=None,
                 log_dir=None,
                 model_dir=None,
                 supervised_model=None,
                 **kwargs):
        self.logger = logging.getLogger("TestDQN")
        self.data_dir = data_dir
        if not os.path.exists(data_dir):
            os.makedirs(data_dir)
        self.model_dir = model_dir if model_dir else self.data_dir
        self.log_dir = log_dir if log_dir else self.model_dir

        self.replay_memory = ReplayMemory(
            memory_size=memory_size, prioritized_replay=prioritized_replay)
        self.demo_memory = ReplayMemory(memory_size=memory_size,
                                        prioritized_replay=prioritized_replay)

        self.fe = FeatureExtractor(**kwargs)
        self.et = ExploreStrategy(n_episodes=n_episodes,
                                  feature_extractor=self.fe,
                                  supervised_model=supervised_model,
                                  **kwargs)

        self.double_dqn = double_dqn
        self.dueling_dqn = dueling_dqn
        self.q_eval, self.q_next = self._build_net()

        self.gamma = gamma
        self.replace_target_iter = replace_target_iter
        self.batch_size = batch_size
        self.n_episodes = n_episodes
        self.n_backup_episodes = n_backup_episodes
        self.resume = resume
        self.demo_dir = demo_dir
        self.demo_pretrain_steps = demo_pretrain_steps
        self._n_learn_steps = 0

    def _build_net(self):
        def build_q_func_old():
            s, a = [
                Input(shape=shape)
                for shape in self.fe.get_feature_shape_old()
            ]
            # inputs = Input(shape=self.fe.get_feature_shape())
            if self.dueling_dqn:
                sa = keras.layers.concatenate([s, a], -1)
                v = build_cnn(self.fe.task_dim)(s)
                a = build_cnn(self.fe.action_dim + self.fe.task_dim)(sa)
                q = Lambda(
                    lambda va: K.expand_dims(va[0] +
                                             (va[1] - K.mean(va[1])), -1),
                    output_shape=(1, ))([v, a])
            else:
                sa = keras.layers.concatenate([s, a], -1)
                q = build_cnn(self.fe.task_dim + self.fe.action_dim)(sa)
            return Model(inputs=[s, a], outputs=q)

        def build_cnn_old(n_dims):
            model = Sequential()
            model.add(
                Conv2D(32, (3, 3),
                       input_shape=(self.fe.height, self.fe.width, n_dims)))
            model.add(Activation('relu'))
            model.add(MaxPooling2D(pool_size=(2, 2)))

            model.add(Conv2D(32, (3, 3)))
            model.add(Activation('relu'))
            model.add(MaxPooling2D(pool_size=(2, 2)))

            model.add(Conv2D(64, (3, 3)))
            model.add(Activation('relu'))
            model.add(MaxPooling2D(pool_size=(2, 2)))

            model.add(Flatten(
            ))  # this converts our 3D feature maps to 1D feature vectors
            model.add(Dense(64))
            model.add(Activation('relu'))
            model.add(Dropout(0.5))
            model.add(Dense(1))
            return model

        def build_q_func():
            dom_img, action_img, query_vec, action_vec = [
                Input(shape=shape) for shape in self.fe.get_feature_shape()
            ]
            # inputs = Input(shape=self.fe.get_feature_shape())
            dom_action_img = keras.layers.concatenate([dom_img, action_img],
                                                      -1)
            dom_action_img_dims = self.fe.dom_feature_image_n_channel + self.fe.action_feature_image_n_channel
            dom_action_vec = build_cnn(dom_action_img_dims)(dom_action_img)
            query_action_vec = keras.layers.concatenate(
                [query_vec, action_vec], -1)
            query_action_vec = Dense(32)(query_action_vec)
            feature_vec = keras.layers.concatenate(
                [dom_action_vec, query_action_vec], -1)
            # feature_vec = Dense(32)(feature_vec)
            # feature_vec = Dense(32, activation='relu')(feature_vec)
            p = Dense(1)(feature_vec)
            model = Model(inputs=[dom_img, action_img, query_vec, action_vec],
                          outputs=p)
            return model

        def build_cnn(n_dims):
            model = Sequential()

            model.add(
                Conv2D(32, (3, 3),
                       input_shape=(self.fe.height, self.fe.width, n_dims)))
            model.add(Activation('relu'))
            model.add(MaxPooling2D(pool_size=(2, 2)))

            model.add(Conv2D(32, (3, 3)))
            model.add(Activation('relu'))
            model.add(MaxPooling2D(pool_size=(2, 2)))

            model.add(Conv2D(32, (3, 3)))
            model.add(Activation('relu'))
            model.add(MaxPooling2D(pool_size=(2, 2)))

            model.add(Conv2D(16, (3, 3)))
            model.add(Activation('relu'))
            model.add(MaxPooling2D(pool_size=(2, 2)))

            # model.add(Dropout(0.5))
            model.add(Flatten(
            ))  # this converts our 3D feature maps to 1D feature vectors
            model.add(Dense(32))
            return model

        q_eval = build_q_func()
        q_next = build_q_func()
        q_eval.compile(loss='mean_squared_error',
                       optimizer='adam',
                       metrics=['accuracy'])
        print(q_eval.summary())
        return q_eval, q_next

    def save_model(self):
        if not self.model_dir:
            return
        model_path = os.path.join(self.model_dir, 'q.h5')
        if os.path.exists(model_path):
            os.remove(model_path)
        self.q_eval.save(model_path, overwrite=True)

    def load_model(self):
        model_path = os.path.join(self.model_dir, 'q.h5')
        if not self.model_dir or not os.path.exists(model_path):
            self.logger.warning("Failed to load model.")
            return
        self.q_eval = keras.models.load_model(model_path)
        self.q_next.set_weights(self.q_eval.get_weights())

    def choose_action_with_model(self, task, q_func=None):
        actions = task.get_preferred_actions()
        if not actions:
            actions = task.state.possible_actions
        tasks = [task] + [task] * len(actions)
        actions = [task.state.finish_action] + actions
        qs = self.predict(tasks, actions, q_func)
        i = int(np.argmax(qs))
        return actions[i], qs[i]

    def predict(self, tasks, actions, q_func):
        if q_func is None:
            q_func = self.q_eval
        features = self.fe.get_feature(tasks, actions)
        return q_func.predict(x=features).squeeze(-1)

    def _learn(self, memory_source=None):
        """
        Fit Q function
        :param memory_source the memory buffer to learn from. Could be `replay`, `demo`, or `hybrid`
        :return max_q and q_error
        """
        # sample batch memory from all memory
        if memory_source == "replay":
            batch_memory = self.replay_memory.sample(self.batch_size)
        elif memory_source == "demo":
            batch_memory = self.demo_memory.sample(self.batch_size)
        else:
            demo_samples = self.demo_memory.sample(self.batch_size / 3)
            replay_samples = self.replay_memory.sample(self.batch_size -
                                                       len(demo_samples))
            batch_memory = demo_samples + replay_samples

        # check to replace target parameters
        self._n_learn_steps += 1
        if self._n_learn_steps % self.replace_target_iter == 0:
            self.q_next.set_weights(self.q_eval.get_weights())

        tasks = []
        actions = []
        q_targets = []
        for transition in batch_memory:
            task, action, task_ = transition.task, transition.action, transition.task_
            if task_.done:
                q_target = task_.reward
            else:
                if self.double_dqn:
                    action_, q_ = self.choose_action_with_model(
                        task_, q_func=self.q_eval)
                    q_ = self.predict([task_], [action_], self.q_next)[0]
                else:
                    action_, q_ = self.choose_action_with_model(
                        task_, q_func=self.q_next)
                q_target = task_.reward + self.gamma * q_
            tasks.append(task)
            actions.append(action)
            q_targets.append(q_target)

        self.q_eval.fit(x=self.fe.get_feature(tasks, actions),
                        y=np.array(q_targets),
                        epochs=1,
                        verbose=0)

        q_predicts = self.predict(tasks, actions, q_func=self.q_eval)
        errors = np.abs(q_predicts - q_targets)

        if self.replay_memory.prioritized_replay:
            for i in range(len(batch_memory)):
                batch_memory[i].error = errors[i]
        for t, a, q_t, q_p in zip(tasks, actions, list(q_targets),
                                  list(q_predicts)):
            self.logger.debug(
                u"Q_predict=%.3f, Q_target=%.3f, State:%s, Action:%s" %
                (q_p, q_t, t.state.state_str, a))
        return float(np.max(q_predicts)), float(np.mean(errors))

    def train(self, tasks, browser):
        env = WebBotEnv(tasks=tasks, browser=browser)
        stats = []

        def save_progress(save_stats=True,
                          save_fig=True,
                          save_model=False,
                          save_memory=False):
            try:
                if save_stats:
                    stats_path = os.path.join(self.model_dir,
                                              "training_stats.json")
                    json.dump(stats, open(stats_path, "w"), indent=2)
                if save_fig:
                    stats_png_path = os.path.join(self.log_dir,
                                                  "training_stats.png")
                    self._plot_training_stats(stats,
                                              self.et.n_explore_episodes,
                                              stats_png_path)
                if save_model:
                    self.save_model()
                if save_memory:
                    self.replay_memory.save(self.model_dir)
            except Exception as e:
                self.logger.warning(e)

        def resume_progress():
            # resume model
            self.load_model()
            # resume memory
            self.replay_memory.load(self.model_dir)
            # resume stats
            stats_path = os.path.join(self.model_dir, "training_stats.json")
            if os.path.exists(stats_path):
                stats.append(json.load(open(stats_path)))

        if self.resume:
            resume_progress()

        if self.demo_dir:
            self.demo_memory.load(self.demo_dir)
            for task in tasks:
                self.demo_memory.update_rewards(task)
            for i in range(self.demo_pretrain_steps):
                self._learn(memory_source="demo")
            self.logger.info("Done pre-training on demos.")

        found_tasklets = {}
        for episode in range(1, self.n_episodes + 1):
            # initial observation
            env.reset()
            task = env.current_task.snapshot()
            self.logger.info("Episode %d/%d, task: %s" %
                             (episode, self.n_episodes, task.task_str))

            max_reward = 0
            while True:
                # break while loop when end of this episode
                if task.done or task.reward < -10:
                    break
                env.render()
                epsilon = self.et.get_epsilon(episode, task)

                # RL choose action based on current task snapshot
                if np.random.uniform() < epsilon:
                    action_type = "Explore"
                    action = self.et.choose_action_to_explore(task)
                else:
                    action_type = "Exploit"
                    action, q = self.choose_action_with_model(
                        task, q_func=self.q_eval)
                env.step(action)

                # self.fe.plot_feature(task, action)
                task_ = env.current_task.snapshot()
                self.replay_memory.store_transition(
                    Transition(task=task, action=action, task_=task_))
                # swap observation
                task = task_
                self.logger.info(
                    "\t%s, epsilon:%.3f, action:%s, %s" %
                    (action_type, epsilon, action, task.get_reward_str()))

                tasklet = task.get_tasklet()
                if tasklet not in found_tasklets:
                    found_tasklets[tasklet] = (task.total_reward, episode,
                                               task.state.screenshot)
                if task.total_reward > max_reward:
                    max_reward = task.total_reward

            if episode > self.et.n_explore_episodes:
                max_q, q_error = self._learn()
            else:
                max_q, q_error = None, None
            epsilon = self.et.get_epsilon(episode=episode)
            stats.append([episode, epsilon, max_reward, max_q, q_error])
            self.logger.info(
                "Episode %d/%d, epsilon %.3f, max_reward %.2f, max_q %.3f, q_error %.3f"
                % (episode, self.n_episodes, epsilon, max_reward, max_q
                   or np.nan, q_error or np.nan))
            if episode % self.n_backup_episodes == 0:
                save_progress(save_fig=True,
                              save_model=False,
                              save_memory=False)
        save_progress(save_fig=True, save_model=True, save_memory=False)
        env.destroy()
        return found_tasklets

    def _plot_training_stats(self, stats, n_explore_episodes, stats_file_path):
        from mpl_toolkits.axes_grid1 import host_subplot
        import mpl_toolkits.axisartist as AA
        import matplotlib.pyplot as plt
        from matplotlib.ticker import MaxNLocator

        episodes, epsilons, rewards, max_qs, errors = zip(*stats)
        if len(stats) <= n_explore_episodes + 1:
            y_values = np.array(rewards)
        else:
            y_values = np.concatenate([
                rewards, max_qs[n_explore_episodes + 1:],
                errors[n_explore_episodes + 1:]
            ])
        y_range = int(np.min(y_values)) - 1, int(np.max(y_values)) + 1

        # plt.rcParams["figure.figsize"] = (10, 8)
        par0 = host_subplot(111, axes_class=AA.Axes)
        par1 = par0.twinx()
        par2 = par0.twinx()
        par3 = par0.twinx()
        plt.subplots_adjust(right=0.85)

        new_fixed_axis = par1.get_grid_helper().new_fixed_axis
        par1.axis["right"] = new_fixed_axis(loc="right",
                                            axes=par1,
                                            offset=(0, 0))
        par1.axis["right"].toggle(all=True)

        par0.set_xlabel("Episode")
        par0.set_ylabel("Epsilon")
        par1.set_ylabel("Reward, Q max, Q error")

        par0.plot(episodes, epsilons, label="Epsilon")
        par1.plot(episodes,
                  rewards,
                  label="Reward",
                  marker='o',
                  linewidth=0,
                  markersize=2)
        par2.plot(episodes,
                  max_qs,
                  label="Q max",
                  marker='.',
                  linewidth=1,
                  markersize=1)
        par3.plot(episodes,
                  errors,
                  label="Q error",
                  marker='.',
                  linewidth=1,
                  markersize=1)

        par0.set_ylim([0, 1])
        par0.xaxis.set_major_locator(MaxNLocator(integer=True))
        par1.set_ylim(y_range)
        par2.set_ylim(y_range)
        par3.set_ylim(y_range)
        par0.legend(loc="upper left")

        plt.draw()
        # plt.show(block=False)
        if stats_file_path:
            plt.savefig(stats_file_path)
        else:
            plt.show()
        plt.gcf().clear()

    def execute(self, tasks, browser):
        env = WebBotEnv(tasks=tasks, browser=browser)
        for task in tasks:
            # initial observation
            env.reset(new_task=task)
            task = env.current_task.snapshot()
            self.logger.info("Executing task: %s" % task.task_str)
            while True:
                if task.done:
                    break
                env.render()
                action, q = self.choose_action_with_model(task,
                                                          q_func=self.q_eval)
                env.step(action)
                # self.fe.plot_feature(task, action)
                task_ = env.current_task.snapshot()
                task = task_
                self.logger.info("\tExploit, action:%s, reward:%.2f, done:%s" %
                                 (action, task.reward, task.done))
            self.logger.info("Got total_reward %.2f in task: %s" %
                             (task.total_reward, task.task_str))
        self.logger.info("Done executing tasks.")
예제 #19
0
import dataset
import pandas as pd
import numpy as np
from feature import FeatureExtractor, get_feature
from sklearn.externals import joblib
##############test load data##########
data = dataset.load_simple_data()
#################################

######################TEST Feature extractor###################
my_extractor = FeatureExtractor(data[0:10])

######################################################


#TRAIN PREMODEL
#from prepare_models import n_grame_train, positive_train, word_train
#words = pd.read_csv('../datas/words.csv', names=['word'], header=None, dtype={'word': np.str}, encoding='utf-8')
#words = words.applymap(lambda x: str(x).strip().lower())
#words = words.dropna()
#words = words.drop_duplicates()
#word_train(words['word'].tolist())
#
#positive = pd.read_csv('../datas/aleax100k.csv', names=['domain'], header=None, dtype={'word': np.str}, encoding='utf-8')
#positive = positive.dropna()
#positive = positive.drop_duplicates()
#positive_train(positive['domain'].tolist())

########################### AEIOU corresponding#####################
#aeiou_corr_arr = my_extractor.count_aeiou()
#print(aeiou_corr_arr)
예제 #20
0
def main():
    # visualization
    # view_2d, view_3d = SLAMView2D(640, 480), SLAMView3D()
    # view_3d = SLAMView3D()
    # view_2d = SLAMView2D()
    stereo_camera_gt, stereo_images, stereo_image_files = loader.get_stereo_data(
    )
    # pick two consecutive images
    K = stereo_camera_gt[stereo_image_files[0]]["K"]
    print("calibration matrix: {}".format(K))
    ind_1, ind_2 = 9, 10
    img1_key, img2_key = stereo_image_files[ind_1], stereo_image_files[ind_2]

    # test visualization

    # cv2.imshow("images", np.concatenate((stereo_images[img1_key], stereo_images[img2_key]), axis=0))
    # cv2.waitKey(0)

    detector = FeatureExtractor()
    kp1, des1 = detector.feature_detecting(stereo_images[img1_key],
                                           mode='feat')
    kp2, des2 = detector.feature_detecting(stereo_images[img2_key],
                                           mode='feat')
    kp1, kp2 = np.array([item.pt
                         for item in kp1]), np.array([item.pt for item in kp2])

    kp1_inliers, kp2_inliers = detector.feature_matching(
        kp1, des1, kp2, des2, K)

    matches = np.stack((kp1[kp1_inliers], kp2[kp2_inliers]), axis=0)
    disp = stereo_images[img2_key]
    # view_2d.draw_2d_matches(disp, matches)

    assert len(kp1_inliers) == len(kp2_inliers)
    print("{} matches found.".format(len(kp1_inliers)))
    pts1, pts2 = kp1[kp1_inliers], kp2[kp2_inliers]

    pts1, pts2 = Homogenize(pts1.T), Homogenize(pts2.T)
    n = pts1.shape[1]
    norm_pts1, norm_pts2 = NormalizePoints(pts1, K), NormalizePoints(pts2, K)
    # use the normalized points to estimate essential matrix
    E = DLT_E(norm_pts1, norm_pts2)
    # E = K.T @ F @ K

    Rt1 = np.hstack(
        (stereo_camera_gt[img1_key]["R"], stereo_camera_gt[img1_key]["t"]))
    Rt2 = np.hstack(
        (stereo_camera_gt[img2_key]["R"], stereo_camera_gt[img2_key]["t"]))
    E_compose = Compose_Essential(Rt1, Rt2)

    print('rank of essential: {}'.format(np.linalg.matrix_rank(E)))
    print("Essential Matrix: {}".format(E))
    print("Ground Truth Essential: {}".format(E_compose))

    # decompose the essential matrix into two projective matrices
    # P1 = [I | 0] -> pts1, P2 = [R | t]
    I, P2 = Decompose_Essential(E, norm_pts1, norm_pts2, mode='note')

    P1, P3 = Project_Essential(I, P2, Rt1)
    cameras = [P1, P3]
    Xs = Triangulation(norm_pts1, norm_pts2, Rt1, Rt2, verbose=False)
    Xs1 = Triangulation(norm_pts1, norm_pts2, I, P2, verbose=True)
    points = [Xs[:, i].reshape(-1, 1) for i in range(Xs.shape[1])]
    colors = [np.ones((3, 1)) for i in range(len(points))]
    # view_3d.draw_cameras_points(cameras, points, colors)

    # cv2.imshow("matches", disp)
    # cv2.waitKey(0)

    print("Rt2: {}".format(Rt2))
    print("P3: {}".format(P3))
    print('rank of essential: {}'.format(np.linalg.matrix_rank(E_compose)))
    U, d, Vt = np.linalg.svd(E_compose)
    print(d)
    print("Ground Truth Essential: {}".format(E_compose))

    assert np.allclose(E, E_compose)
    print("PASS")