Esempio n. 1
0
    def f_info(self, w):
        train_loss = None
        val_loss = None

        s = self.net.get_status_info()
        if len(s) > 0: s += ', '

        w_0 = self.net.get_param_vec()
        self.net.set_noiseless_param_from_vec(w)
        self.setup_batch_normalization_mean_std()
        self.net.load_target(self.t_train)
        y = self.net.forward_prop(self.x_train,
                                  add_noise=False,
                                  compute_loss=True,
                                  is_test=True)
        train_loss = self.net.get_loss() / self.x_train.shape[0]
        train_acc = self._compute_accuracy(self.t_train, y.argmax(axis=1))

        if self.use_validation:
            self.net.load_target(self.t_val)
            y = self.net.forward_prop(self.x_val,
                                      add_noise=False,
                                      compute_loss=True,
                                      is_test=True)
            val_loss = self.net.get_loss() / self.x_val.shape[0]
            val_acc = self._compute_accuracy(self.t_val, y.argmax(axis=1))
            self.net.load_target(self.t_train)

            s += 'train loss %.4f, acc %.4f, val loss %.4f, acc ' % (
                train_loss, train_acc, val_loss)
            if self.best_obj is None or val_acc > self.best_obj:
                self.best_obj = val_acc
                self.best_w = w.copy()
                s += co.good_colored_str('%.4f' % val_acc)
            else:
                s += '%.4f' % val_acc
        else:
            s += 'train loss %.4f, acc ' % train_loss
            if self.best_obj is None or train_acc < self.best_obj:
                self.best_obj = train_acc
                self.best_w = w.copy()
                s += co.good_colored_str('%.4f' % train_acc)
            else:
                s += '%.4f' % train_acc

        self.net.set_param_from_vec(w_0)
        return s
Esempio n. 2
0
    def f_info(self, w):
        """
        This is a reference implementatoin of this function, but can be 
        customized for other learners as well.
        """
        train_loss = None
        val_loss = None

        w_0 = self.net.get_param_vec()

        self.net.set_noiseless_param_from_vec(w)
        #self.net.load_target(self.t_train)
        #self.net.forward_prop(self.x_train, add_noise=False, compute_loss=True)
        #train_loss = self.net.get_loss() / self.x_train.shape[0]
        train_loss = self.evaluate_loss_large_set(self.x_train, self.t_train)

        if self.use_validation:
            #self.net.load_target(self.t_val)
            #self.net.forward_prop(self.x_val, add_noise=False, compute_loss=True)
            #val_loss = self.net.get_loss() / self.x_val.shape[0]
            val_loss = self.evaluate_loss_large_set(self.x_val, self.t_val)

            s = 'train loss %.4f, val loss ' % train_loss
            if self.best_obj is None or val_loss < self.best_obj:
                self.best_obj = val_loss
                self.best_w = w.copy()
                s += co.good_colored_str('%.4f' % val_loss)
            else:
                s += '%.4f' % val_loss
        else:
            s = 'train loss '
            if self.best_obj is None or train_loss < self.best_obj:
                self.best_obj = train_loss
                self.best_w = w.copy()
                s += co.good_colored_str('%.4f' % train_loss)
            else:
                s += '%.4f' % train_loss

        self.net.load_target(self.t_train)
        self.net.set_param_from_vec(w_0)
        
        net_status = self.net.get_status_info()
        if len(net_status) > 0:
            s += ', ' + net_status
        return s
Esempio n. 3
0
    def f_info(self, w):
        """
        This is a reference implementatoin of this function, but can be 
        customized for other learners as well.
        """
        train_loss = None
        val_loss = None

        w_0 = self.net.get_param_vec()

        self.net.set_noiseless_param_from_vec(w)
        self.setup_batch_normalization_mean_std()

        train_loss = self.evaluate_loss_large_set(self.x_train, self.t_train)

        s = self.net.get_status_info()
        if len(s) > 0:
            s += ', '

        if self.use_validation:
            val_loss = self.evaluate_loss_large_set(self.x_val, self.t_val)

            s += 'train loss %.4f, val loss ' % train_loss
            if self.best_obj is None or val_loss < self.best_obj:
                self.best_obj = val_loss
                self.best_w = w.copy()
                s += co.good_colored_str('%.4f' % val_loss)
            else:
                s += '%.4f' % val_loss
        else:
            s += 'train loss '
            if self.best_obj is None or train_loss < self.best_obj:
                self.best_obj = train_loss
                self.best_w = w.copy()
                s += co.good_colored_str('%.4f' % train_loss)
            else:
                s += '%.4f' % train_loss

        # self.net.load_target(self.t_train)
        self.net.set_param_from_vec(w_0)
        
        return s
Esempio n. 4
0
    def f_info(self, w):
        """
        This is a reference implementatoin of this function, but can be 
        customized for other learners as well.
        """
        train_loss = None
        val_loss = None

        w_0 = self.net.get_param_vec()

        self.net.set_noiseless_param_from_vec(w)
        self.setup_batch_normalization_mean_std()

        train_loss = self.evaluate_loss_large_set(self.x_train, self.t_train)

        s = self.net.get_status_info()
        if len(s) > 0:
            s += ', '

        if self.use_validation:
            val_loss = self.evaluate_loss_large_set(self.x_val, self.t_val)

            s += 'train loss %.4f, val loss ' % train_loss
            if self.best_obj is None or val_loss < self.best_obj:
                self.best_obj = val_loss
                self.best_w = w.copy()
                s += co.good_colored_str('%.4f' % val_loss)
            else:
                s += '%.4f' % val_loss
        else:
            s += 'train loss '
            if self.best_obj is None or train_loss < self.best_obj:
                self.best_obj = train_loss
                self.best_w = w.copy()
                s += co.good_colored_str('%.4f' % train_loss)
            else:
                s += '%.4f' % train_loss

        # self.net.load_target(self.t_train)
        self.net.set_param_from_vec(w_0)

        return s
Esempio n. 5
0
    def f_info(self, w):
        train_loss = None
        val_loss = None

        s = self.net.get_status_info()
        if len(s) > 0: s += ', '

        w_0 = self.net.get_param_vec()
        self.net.set_noiseless_param_from_vec(w)
        self.setup_batch_normalization_mean_std()
        self.net.load_target(self.t_train)
        y = self.net.forward_prop(self.x_train, add_noise=False, compute_loss=True, is_test=True)
        train_loss = self.net.get_loss() / self.x_train.shape[0]
        train_acc = self._compute_accuracy(self.t_train, y.argmax(axis=1))

        if self.use_validation:
            self.net.load_target(self.t_val)
            y = self.net.forward_prop(self.x_val, add_noise=False, compute_loss=True, is_test=True)
            val_loss = self.net.get_loss() / self.x_val.shape[0]
            val_acc = self._compute_accuracy(self.t_val, y.argmax(axis=1))
            self.net.load_target(self.t_train)

            s += 'train loss %.4f, acc %.4f, val loss %.4f, acc ' % (train_loss, train_acc, val_loss)
            if self.best_obj is None or val_acc > self.best_obj:
                self.best_obj = val_acc 
                self.best_w = w.copy()
                s += co.good_colored_str('%.4f' % val_acc)
            else:
                s += '%.4f' % val_acc
        else:
            s += 'train loss %.4f, acc ' % train_loss
            if self.best_obj is None or train_acc < self.best_obj:
                self.best_obj = train_acc
                self.best_w = w.copy()
                s += co.good_colored_str('%.4f' % train_acc)
            else:
                s += '%.4f' % train_acc

        self.net.set_param_from_vec(w_0)
        return s