Ejemplo n.º 1
0
    def test(self, data):
        """Test routine"""

        # Check if model exists
        if not os.path.exists(self.save_file_best + ".index"):
            print("Model File {} does not exist! Quiting".format(
                self.save_file_best))
            exit(1)

        # Restore model
        print("Restoring from {}...".format(self.save_file_best))
        self.saver_best.restore(self.sess, self.save_file_best)

        # Run Test
        cur_global_step = 0  # dummy
        if self.config.vis_dump:
            test_mode_list = ["test"]
        else:
            # test_mode_list = ["valid", "test"]
            test_mode_list = ["test"]  # Only run testing
        for test_mode in test_mode_list:
            test_process(test_mode, self.sess, cur_global_step,
                         self.summary_op,
                         getattr(self, "summary_" + test_mode[:2]), self.x_in,
                         self.y_in, self.R_in, self.t_in, self.is_training,
                         None, None, None, self.logits, self.e_hat, self.loss,
                         data[test_mode],
                         getattr(self,
                                 "res_dir_" + test_mode[:2]), self.config)
Ejemplo n.º 2
0
    def test(self, data):
        """Test routine"""

        # Check if model exists
        if not os.path.exists(self.save_file_best + ".index"):
            print("Model File {} does not exist! Quiting".format(
                self.save_file_best))
            exit(1)

        # Restore model
        print("Restoring from {}...".format(self.save_file_best))
        self.saver_best.restore(self.sess, self.save_file_best)
        # Run Test
        cur_global_step = 0  # dummy

        test_mode_list = ["test"]
        for test_mode in test_mode_list:
            te_res = test_process(test_mode,
                                  self.sess,
                                  cur_global_step,
                                  self.summary_va,
                                  self.x_in,
                                  self.R_in,
                                  self.t_in,
                                  self.is_training,
                                  data[test_mode],
                                  self.res_dir_va,
                                  self.config,
                                  False,
                                  w=self.w,
                                  delta=None)
Ejemplo n.º 3
0
    def val(self, data_loader, epoch):

        va_res = 0
        cur_global_step = self.global_step
        va_res, summary_t = test_process(self.net, "valid", data_loader,
                                         self.res_dir_va, self.config, True)
        for entry in summary_t:
            self.tensorboardX.add_scalars(entry['tag'], {'val': entry['val']},
                                          self.global_step)
        # Higher the better

        if va_res > self.best_va_res:
            print("Saving best model with va_res = {}".format(va_res))
            self.best_va_res = va_res
            # Save best validation result
            with open(self.va_res_file, "w") as ofp:
                ofp.write("{:e}\n".format(self.best_va_res))
            # Save best model
            cprint('save best model: %s' % epoch, 'yellow')
            self._save_checkpoint(self.net,
                                  self.optimizer,
                                  os.path.join(self.config.EXP_DIR,
                                               'best_checkpoint_G.pth.tar'),
                                  do_cleaning=False,
                                  epoch=epoch)
Ejemplo n.º 4
0
    def test(self, data):
        """Test routine"""

        # Check if model exists
        if not os.path.exists(self.save_file_best + ".index"):
            print("Model File {} does not exist! Quiting".format(
                self.save_file_best))
            exit(1)

        # Restore model
        print("Restoring from {}...".format(self.save_file_best))
        self.saver_best.restore(self.sess, self.save_file_best)
        # Run Test
        cur_global_step = 0  # dummy
        test_mode_list = ["test"]  # Only evaluate on test set
        for test_mode in test_mode_list:
            score = self.last_logit
            if self.bool_use_weight_for_score:
                print("score is from weights!")
                score = self.last_weights
            if self.config.weight_opt == "sigmoid_softmax":
                score = [
                    self.last_logit, self.logit_softmax, self.last_weights
                ]

            test_process_ins = [
                self.x_in, self.y_in, self.R_in, self.t_in, self.is_training
            ]

            if self.config.use_fundamental > 0:
                test_process_ins += [
                    self.T1_in, self.T2_in, self.K1_in, self.K2_in
                ]

            test_process(test_mode, self.sess, cur_global_step,
                         self.summary_op,
                         getattr(self,
                                 "summary_" + test_mode[:2]), test_process_ins,
                         None, None, None, self.logits, self.e_hat, self.loss,
                         self.precision, self.recall, self.last_e_hat, score,
                         self.last_x_in, data[test_mode],
                         getattr(self,
                                 "res_dir_" + test_mode[:2]), self.config)
Ejemplo n.º 5
0
    def train(self, data):
        """Training function.

        Parameters
        ----------
        data_tr : tuple
            Training data.

        data_va : tuple
            Validation data.

        x_va : ndarray
            Validation data.

        y_va : ndarray
            Validation labels.

        """

        print("Initializing...")
        self.sess.run(tf.global_variables_initializer())

        # ----------------------------------------
        # Resume data if it already exists
        latest_checkpoint = tf.train.latest_checkpoint(self.res_dir_tr)
        b_resume = latest_checkpoint is not None
        if b_resume:
            # Restore network
            print("Restoring from {}...".format(self.res_dir_tr))
            self.saver_cur.restore(self.sess, latest_checkpoint)
            # restore number of steps so far
            step = self.sess.run(self.global_step)
            # restore best validation result
            if os.path.exists(self.va_res_file):
                with open(self.va_res_file, "r") as ifp:
                    dump_res = ifp.read()
                dump_res = parse("{best_va_res:e}\n", dump_res)
                best_va_res = dump_res["best_va_res"]
        else:
            print("Starting from scratch...")
            step = 0
            best_va_res = -1

        # ----------------------------------------
        # Unpack some data for simple coding
        xs_tr = data["train"]["xs"]
        ys_tr = data["train"]["ys"]
        Rs_tr = data["train"]["Rs"]
        ts_tr = data["train"]["ts"]

        # ----------------------------------------
        # The training loop
        batch_size = self.config.train_batch_size
        max_iter = self.config.train_iter
        for step in trange(step, max_iter, ncols=self.config.tqdm_width):

            # ----------------------------------------
            # Batch construction

            # Get a random training batch
            ind_cur = np.random.choice(len(xs_tr), batch_size, replace=False)
            # Use minimum kp in batch to construct the batch
            numkps = np.array([xs_tr[_i].shape[1] for _i in ind_cur])
            cur_num_kp = numkps.min()
            # Actual construction of the batch
            xs_b = np.array([xs_tr[_i][:, :cur_num_kp, :] for _i in ind_cur
                             ]).reshape(batch_size, 1, cur_num_kp, 4)
            ys_b = np.array([ys_tr[_i][:cur_num_kp, :] for _i in ind_cur
                             ]).reshape(batch_size, cur_num_kp, 2)
            Rs_b = np.array([Rs_tr[_i]
                             for _i in ind_cur]).reshape(batch_size, 9)
            ts_b = np.array([ts_tr[_i]
                             for _i in ind_cur]).reshape(batch_size, 3)

            # ----------------------------------------
            # Train

            # Feed Dict
            feed_dict = {
                self.x_in: xs_b,
                self.y_in: ys_b,
                self.R_in: Rs_b,
                self.t_in: ts_b,
                self.is_training: True,
            }
            # Fetch
            fetch = {
                "optim": self.optim,
            }
            # Check if we want to write summary and check validation
            b_write_summary = ((step + 1) % self.config.report_intv) == 0
            b_validate = ((step + 1) % self.config.val_intv) == 0
            if b_write_summary or b_validate:
                fetch["summary"] = self.summary_op
                fetch["global_step"] = self.global_step
            # Run optimization
            try:
                res = self.sess.run(fetch, feed_dict=feed_dict)
            except (ValueError, tf.errors.InvalidArgumentError):
                print("Backward pass had numerical errors. "
                      "This training batch is skipped!")
                continue
            # Write summary and save current model
            if b_write_summary:
                self.summary_tr.add_summary(res["summary"],
                                            global_step=res["global_step"])
                self.saver_cur.save(self.sess,
                                    self.save_file_cur,
                                    global_step=self.global_step,
                                    write_meta_graph=False)

            # ----------------------------------------
            # Validation
            if b_validate:
                va_res = 0
                cur_global_step = res["global_step"]
                va_res = test_process("valid", self.sess, cur_global_step,
                                      self.summary_op, self.summary_va,
                                      self.x_in, self.y_in, self.R_in,
                                      self.t_in, self.is_training, None, None,
                                      None, self.logits, self.e_hat, self.loss,
                                      data["valid"], self.res_dir_va,
                                      self.config, True)
                # Higher the better
                if va_res > best_va_res:
                    print("Saving best model with va_res = {}".format(va_res))
                    best_va_res = va_res
                    # Save best validation result
                    with open(self.va_res_file, "w") as ofp:
                        ofp.write("{:e}\n".format(best_va_res))
                    # Save best model
                    self.saver_best.save(
                        self.sess,
                        self.save_file_best,
                        write_meta_graph=False,
                    )
Ejemplo n.º 6
0
    def train(self, data):
        """Training function.

        Parameters
        ----------
        data_tr : tuple
            Training data.

        data_va : tuple
            Validation data.

        x_va : ndarray
            Validation data.

        y_va : ndarray
            Validation labels.

        """

        print("Initializing...")
        self.sess.run(tf.global_variables_initializer())

        # ----------------------------------------
        # Resume data if it already exists
        latest_checkpoint = tf.train.latest_checkpoint(self.res_dir_tr)
        b_resume = latest_checkpoint is not None
        if b_resume:
            # Restore network
            print("Restoring from {}...".format(self.res_dir_tr))
            self.saver_cur.restore(self.sess, latest_checkpoint)
            # restore number of steps so far
            step = self.sess.run(self.global_step)
            # restore best validation result
            if os.path.exists(self.va_res_file):
                with open(self.va_res_file, "r") as ifp:
                    dump_res = ifp.read()
                dump_res = parse("{best_va_res:e}\n", dump_res)
                best_va_res = dump_res["best_va_res"]
            if os.path.exists(self.va_res_file_ours_ransac):
                with open(self.va_res_file_ours_ransac, "r") as ifp:
                    dump_res = ifp.read()
                dump_res = parse("{best_va_res:e}\n", dump_res)
                best_va_res_ours_ransac = dump_res["best_va_res"]
        else:
            print("Starting from scratch...")
            step = 0
            best_va_res = -1
            best_va_res_ours_ransac = -1

        # ----------------------------------------
        if self.config.data_name.startswith("oan"):
            data_loader = iter(data["train"])
        else:
            # Unpack some data for simple coding
            xs_tr = data["train"]["xs"]
            ys_tr = data["train"]["ys"]
            Rs_tr = data["train"]["Rs"]
            ts_tr = data["train"]["ts"]
            T1s_tr = data["train"]["T1s"]
            T2s_tr = data["train"]["T2s"]
            K1s_tr = data["train"]["K1s"]
            K2s_tr = data["train"]["K2s"]

        # ----------------------------------------
        # The training loop
        batch_size = self.config.train_batch_size
        max_iter = self.config.train_iter

        for step in trange(step, max_iter, ncols=self.config.tqdm_width):
            # ----------------------------------------
            # Batch construction

            # Get a random training batch
            if self.config.data_name.startswith("oan"):
                try:
                    data_dict = next(data_loader)
                except StopIteration:
                    data_loader = iter(data["train"])
                    data_dict = next(data_loader)

                xs_b = data_dict["xs"]
                ys_b = data_dict["ys"]
                Rs_b = data_dict["Rs"].reshape(-1, 9)
                ts_b = data_dict["ts"].reshape(-1, 3)
                T1s_b = data_dict["T1s"]
                T2s_b = data_dict["T2s"]
                K1s_b = data_dict["K1s"]
                K2s_b = data_dict["K2s"]
            else:
                ind_cur = np.random.choice(len(xs_tr),
                                           batch_size,
                                           replace=False)
                # Use minimum kp in batch to construct the batch
                numkps = np.array([xs_tr[_i].shape[1] for _i in ind_cur])
                cur_num_kp = numkps.min()
                # Actual construction of the batch
                xs_b = np.array([
                    xs_tr[_i][:, :cur_num_kp, :] for _i in ind_cur
                ]).reshape(batch_size, 1, cur_num_kp, 4)
                ys_b = np.array([ys_tr[_i][:cur_num_kp, :] for _i in ind_cur
                                 ]).reshape(batch_size, cur_num_kp, 2)
                Rs_b = np.array([Rs_tr[_i]
                                 for _i in ind_cur]).reshape(batch_size, 9)
                ts_b = np.array([ts_tr[_i]
                                 for _i in ind_cur]).reshape(batch_size, 3)
                if self.config.use_fundamental > 0:
                    T1s_b = np.array([T1s_tr[_i] for _i in ind_cur])
                    T2s_b = np.array([T2s_tr[_i] for _i in ind_cur])
                    K1s_b = np.array([K1s_tr[_i] for _i in ind_cur])
                    K2s_b = np.array([K2s_tr[_i] for _i in ind_cur])
            # ----------------------------------------
            # Train

            # Feed Dict
            feed_dict = {
                self.x_in: xs_b,
                self.y_in: ys_b,
                self.R_in: Rs_b,
                self.t_in: ts_b,
                self.is_training: True,
            }

            # add use_fundamental
            if self.config.use_fundamental > 0:
                feed_dict[self.T1_in] = T1s_b
                feed_dict[self.T2_in] = T2s_b
                feed_dict[self.K1_in] = K1s_b
                feed_dict[self.K2_in] = K2s_b

            # Fetch
            fetch = {
                "optim": self.optim,
                "loss": self.loss,
                "precision": self.precision,
                "recall": self.recall,
            }
            # Check if we want to write summary and check validation
            b_write_summary = ((step + 1) % self.config.report_intv) == 0
            b_validate = ((step + 1) % self.config.val_intv) == 0
            if b_write_summary or b_validate:
                fetch["summary"] = self.summary_op
                fetch["global_step"] = self.global_step
            # Run optimization
            # res = self.sess.run(fetch, feed_dict=feed_dict)
            try:
                res = self.sess.run(fetch, feed_dict=feed_dict)
            except (ValueError, tf.errors.InvalidArgumentError):
                print("Backward pass had numerical errors. "
                      "This training batch is skipped!")
                continue
            # Write summary and save current model
            if b_write_summary:
                self.summary_tr.add_summary(res["summary"],
                                            global_step=res["global_step"])
                self.saver_cur.save(self.sess,
                                    self.save_file_cur,
                                    global_step=self.global_step,
                                    write_meta_graph=False)

            # ----------------------------------------
            # Validation
            if b_validate:
                va_res = 0
                cur_global_step = res["global_step"]
                score = self.last_logit  # defaul score: local attention
                if self.config.weight_opt == "sigmoid_softmax":
                    score = [
                        self.last_logit, self.logit_softmax, self.last_weights
                    ]

                test_process_ins = [
                    self.x_in, self.y_in, self.R_in, self.t_in,
                    self.is_training
                ]

                if self.config.use_fundamental > 0:
                    test_process_ins += [
                        self.T1_in, self.T2_in, self.K1_in, self.K2_in
                    ]

                va_res, va_res_ours_ransac = test_process(
                    "valid", self.sess, cur_global_step, self.summary_op,
                    self.summary_va, test_process_ins, None, None, None,
                    self.logits, self.e_hat, self.loss, self.precision,
                    self.recall, self.last_e_hat, score, self.last_x_in,
                    data["valid"], self.res_dir_va, self.config, True)
                # Higher the better
                if va_res > best_va_res:
                    print("Saving best model with va_res = {}".format(va_res))
                    best_va_res = va_res
                    # Save best validation result
                    with open(self.va_res_file, "w") as ofp:
                        ofp.write("{:e}\n".format(best_va_res))
                    # Save best model
                    self.saver_best.save(
                        self.sess,
                        self.save_file_best,
                        write_meta_graph=False,
                    )
                if va_res_ours_ransac > best_va_res_ours_ransac:
                    print("Saving best model with va_res_ours_ransac = {}".
                          format(va_res_ours_ransac))
                    best_va_res_ours_ransac = va_res_ours_ransac
                    # Save best validation result
                    with open(self.va_res_file_ours_ransac, "w") as ofp:
                        ofp.write("{:e}\n".format(best_va_res_ours_ransac))
                    # Save best model
                    self.saver_best.save(
                        self.sess,
                        self.save_file_best_ours_ransac,
                        write_meta_graph=False,
                    )