Ejemplo n.º 1
0
 def _get_prediction(self, x, name=None, verbose=None):
     if verbose is None:
         verbose = self.verbose
     fc_shape = np.prod(x.shape[1:])  # type: int
     single_batch = int(NNConfig.BATCH_SIZE / fc_shape)
     if not single_batch:
         single_batch = 1
     if not single_batch:
         single_batch = 1
     if single_batch >= len(x):
         return self._sess.run(self._y_pred, {self._tfx: x})
     epoch = int(len(x) / single_batch)
     if not len(x) % single_batch:
         epoch += 1
     name = "Prediction" if name is None else "Prediction ({})".format(name)
     sub_bar = ProgressBar(max_value=epoch, name=name, start=False)
     if verbose >= NNVerbose.METRICS:
         sub_bar.start()
     rs, count = [], 0
     while count < len(x):
         count += single_batch
         if count >= len(x):
             rs.append(self._sess.run(self._y_pred, {self._tfx: x[count - single_batch:]}))
         else:
             rs.append(self._sess.run(self._y_pred, {self._tfx: x[count - single_batch:count]}))
         if verbose >= NNVerbose.METRICS:
             sub_bar.update()
     return np.vstack(rs)
Ejemplo n.º 2
0
 def fit(self, x, y, sample_weight=None, clf=None, epoch=None, eps=None, **kwargs):
     if sample_weight is None:
         sample_weight = self._params["sample_weight"]
     if clf is None:
         clf = self._params["clf"]
     if epoch is None:
         epoch = self._params["epoch"]
     if eps is None:
         eps = self._params["eps"]
     x, y = np.atleast_2d(x), np.asarray(y)
     if clf is None:
         clf = "Cart"
         kwargs = {"max_depth": 1}
     self._kwarg_cache = kwargs
     self._clf = clf
     if sample_weight is None:
         sample_weight = np.ones(len(y)) / len(y)
     else:
         sample_weight = np.asarray(sample_weight)
     bar = ProgressBar(max_value=epoch, name="AdaBoost")
     for _ in range(epoch):
         tmp_clf = AdaBoost._weak_clf[clf](**kwargs)
         tmp_clf.fit(x, y, sample_weight=sample_weight)
         y_pred = tmp_clf.predict(x)
         em = min(max((y_pred != y).astype(np.int8).dot(sample_weight[..., None])[0], eps), 1 - eps)
         am = 0.5 * log(1 / em - 1)
         sample_weight *= np.exp(-am * y * y_pred)
         sample_weight /= np.sum(sample_weight)
         self._clfs.append(deepcopy(tmp_clf))
         self._clfs_weights.append(am)
         bar.update()
     self._clfs_weights = np.array(self._clfs_weights, dtype=np.float32)
Ejemplo n.º 3
0
 def fit(self, x, n_clusters=None, epoch=None, norm=None, animation_params=None):
     if n_clusters is None:
         n_clusters = self._params["n_clusters"]
     if epoch is None:
         epoch = self._params["epoch"]
     if norm is not None:
         self._params["norm"] = norm
     *animation_properties, animation_params = self._get_animation_params(animation_params)
     x = np.atleast_2d(x)
     arange = np.arange(n_clusters)[..., None]
     x_high_dim, labels_cache, counter = x[:, None, ...], None, 0
     self._centers = x[np.random.permutation(len(x))[:n_clusters]]
     bar = ProgressBar(max_value=epoch, name="KMeans")
     ims = []
     for i in range(epoch):
         labels = self.predict(x_high_dim, high_dim=True)
         if labels_cache is None:
             labels_cache = labels
         else:
             if np.all(labels_cache == labels):
                 bar.update(epoch)
                 break
             else:
                 labels_cache = labels
         for j, indices in enumerate(labels == arange):
             self._centers[j] = np.average(x[indices], axis=0)
         counter += 1
         animation_params["extra"] = self._centers
         self._handle_animation(i, x, labels, ims, animation_params, *animation_properties)
         bar.update()
     self._counter = counter
     self._handle_mp4(ims, animation_properties)
Ejemplo n.º 4
0
 def _get_prediction(self, x, name=None, batch_size=1e6, verbose=None):
     if verbose is None:
         verbose = self.verbose
     fc_shape = np.prod(x.shape[1:])  # type: int
     single_batch = int(batch_size / fc_shape)
     if not single_batch:
         single_batch = 1
     if single_batch >= len(x):
         return self._get_activations(x, predict=True).pop()
     epoch = int(len(x) / single_batch)
     if not len(x) % single_batch:
         epoch += 1
     name = "Prediction" if name is None else "Prediction ({})".format(name)
     sub_bar = ProgressBar(max_value=epoch, name=name, start=False)
     if verbose >= NNVerbose.METRICS:
         sub_bar.start()
     rs, count = [self._get_activations(x[:single_batch], predict=True).pop()], single_batch
     if verbose >= NNVerbose.METRICS:
         sub_bar.update()
     while count < len(x):
         count += single_batch
         if count >= len(x):
             rs.append(self._get_activations(x[count-single_batch:], predict=True).pop())
         else:
             rs.append(self._get_activations(x[count-single_batch:count], predict=True).pop())
         if verbose >= NNVerbose.METRICS:
             sub_bar.update()
     return np.vstack(rs)
Ejemplo n.º 5
0
    def fit(self, x, y, sample_weight=None, lr=None, epoch=None, animation_params=None):
        if sample_weight is None:
            sample_weight = self._params["sample_weight"]
        if lr is None:
            lr = self._params["lr"]
        if epoch is None:
            epoch = self._params["epoch"]
        *animation_properties, animation_params = self._get_animation_params(animation_params)

        x, y = np.atleast_2d(x), np.asarray(y)
        if sample_weight is None:
            sample_weight = np.ones(len(y))
        else:
            sample_weight = np.asarray(sample_weight) * len(y)

        self._w = np.random.random(x.shape[1])
        self._b = 0.
        ims = []
        bar = ProgressBar(max_value=epoch, name="Perceptron")
        for i in range(epoch):
            y_pred = self.predict(x, True)
            err = -y * y_pred * sample_weight
            idx = np.argmax(err)
            if err[idx] < 0:
                bar.terminate()
                break
            w_norm = np.linalg.norm(self._w)
            delta = lr * y[idx] * sample_weight[idx] / w_norm
            self._w += delta * (x[idx] - y_pred[idx] * self._w / w_norm ** 2)
            self._b += delta
            self._handle_animation(i, x, y, ims, animation_params, *animation_properties)
            bar.update()
        self._handle_mp4(ims, animation_properties)
Ejemplo n.º 6
0
def run(clf):
    acc_records, y_records = [], []
    bar = ProgressBar(max_value=10, name="Main")
    for _ in range(10):
        if clf == "Naive Bayes":
            _clf = SKMultinomialNB(alpha=0.1)
        elif clf == "Non-linear SVM":
            _clf = SKSVM()
        else:
            _clf = SKLinearSVM()
        rs = main(_clf)
        acc_records.append(rs[0])
        y_records += rs[1]
        bar.update()
    acc_records = np.array(acc_records) * 100

    plt.figure()
    plt.boxplot(acc_records, vert=False, showmeans=True)
    plt.show()

    from Util.DataToolkit import DataToolkit
    idx = np.argmax(acc_records)  # type: int
    print(metrics.classification_report(y_records[idx][0], y_records[idx][1], target_names=np.load(os.path.join(
        "_Data", "LABEL_DIC.npy"
    ))))
    toolkit = DataToolkit(acc_records[np.argmax(np.average(acc_records, axis=1))])
    print("Acc Mean     : {:8.6}".format(toolkit.mean))
    print("Acc Variance : {:8.6}".format(toolkit.variance))
    print("Done")
Ejemplo n.º 7
0
 def opt(self, epoch=None, eps=None):
     """
     Main procedure of opt
     :param epoch : Maximum iteration ; default: 1000
     :param eps   : Tolerance         ; default: 1e-8
     :return      : x*, f*, n_iter, feva
     """
     if epoch is None:
         epoch = self._params["epoch"]
     if eps is None:
         eps = self._params["eps"]
     self._func.refresh_cache(self._x)
     self._loss_cache, self._grad_cache = self.func(0), self.func(1)
     bar = ProgressBar(max_value=epoch, name="Opt")
     for _ in range(epoch):
         self.iter += 1
         with warnings.catch_warnings():
             warnings.filterwarnings("error")
             try:
                 if self._core(eps):
                     break
                 self.log.append(self._loss_cache)
             except RuntimeWarning as err:
                 print("\n", err, "\n")
                 break
             except np.linalg.linalg.LinAlgError as err:
                 print("\n", err, "\n")
                 break
         bar.update()
     bar.update()
     bar.terminate()
     return self._x, self._loss_cache, self.iter, self.feva
Ejemplo n.º 8
0
def main(clf):
    dat_path = os.path.join("_Data", "dataset.dat")
    gen_dataset(dat_path)
    with open(dat_path, "rb") as _file:
        x, y = pickle.load(_file)
    x = [" ".join(sentence) for sentence in x]
    _indices = np.random.permutation(len(x))
    x = list(np.array(x)[_indices])
    y = list(np.array(y)[_indices])
    data_len = len(x)
    batch_size = math.ceil(data_len * 0.1)
    acc_lst, y_results = [], []
    bar = ProgressBar(max_value=10, name=str(clf))
    for i in range(10):
        _next = (i + 1) * batch_size if i != 9 else data_len
        x_train = x[:i * batch_size] + x[(i + 1) * batch_size:]
        y_train = y[:i * batch_size] + y[(i + 1) * batch_size:]
        x_test, y_test = x[i * batch_size:_next], y[i * batch_size:_next]
        count_vec = CountVectorizer()
        counts_train = count_vec.fit_transform(x_train)
        x_test = count_vec.transform(x_test)
        tfidf_transformer = TfidfTransformer()
        x_train = tfidf_transformer.fit_transform(counts_train)
        clf.fit(x_train, y_train)
        y_pred = clf.predict(x_test)
        acc_lst.append(clf.acc(y_test, y_pred))
        y_results.append([y_test, y_pred])
        del x_train, y_train, x_test, y_test, y_pred
        bar.update()
    return acc_lst, y_results
Ejemplo n.º 9
0
 def _get_prediction(self, x, name=None, batch_size=1e6, verbose=None, out_of_sess=False):
     if verbose is None:
         verbose = self.verbose
     fc_shape = np.prod(x.shape[1:])  # type: float
     single_batch = int(batch_size / fc_shape)
     if not single_batch:
         single_batch = 1
     if single_batch >= len(x):
         if not out_of_sess:
             return self._y_pred.eval(feed_dict={self._tfx: x})
         with self._sess.as_default():
             x = x.astype(np.float32)
             return self.get_rs(x).eval(feed_dict={self._tfx: x})
     epoch = int(len(x) / single_batch)
     if not len(x) % single_batch:
         epoch += 1
     name = "Prediction" if name is None else "Prediction ({})".format(name)
     sub_bar = ProgressBar(max_value=epoch, name=name, start=False)
     if verbose >= NNVerbose.METRICS:
         sub_bar.start()
     if not out_of_sess:
         rs = [self._y_pred.eval(feed_dict={self._tfx: x[:single_batch]})]
     else:
         rs = [self.get_rs(x[:single_batch])]
     count = single_batch
     if verbose >= NNVerbose.METRICS:
         sub_bar.update()
     while count < len(x):
         count += single_batch
         if count >= len(x):
             if not out_of_sess:
                 rs.append(self._y_pred.eval(feed_dict={self._tfx: x[count - single_batch:]}))
             else:
                 rs.append(self.get_rs(x[count - single_batch:]))
         else:
             if not out_of_sess:
                 rs.append(self._y_pred.eval(feed_dict={self._tfx: x[count - single_batch:count]}))
             else:
                 rs.append(self.get_rs(x[count - single_batch:count]))
         if verbose >= NNVerbose.METRICS:
             sub_bar.update()
     if out_of_sess:
         with self._sess.as_default():
             rs = [_rs.eval() for _rs in rs]
     return np.vstack(rs)
Ejemplo n.º 10
0
    def fit(self, x, y, sample_weight=None, c=None, lr=None, optimizer=None,
            batch_size=None, epoch=None, tol=None, animation_params=None):
        if sample_weight is None:
            sample_weight = self._params["sample_weight"]
        if c is None:
            c = self._params["c"]
        if lr is None:
            lr = self._params["lr"]
        if batch_size is None:
            batch_size = self._params["batch_size"]
        if epoch is None:
            epoch = self._params["epoch"]
        if tol is None:
            tol = self._params["tol"]
        if optimizer is None:
            optimizer = self._params["optimizer"]
        *animation_properties, animation_params = self._get_animation_params(animation_params)
        x, y = np.atleast_2d(x), np.asarray(y, dtype=np.float32)
        if sample_weight is None:
            sample_weight = np.ones(len(y))
        else:
            sample_weight = np.asarray(sample_weight) * len(y)

        self._w = np.zeros(x.shape[1], dtype=np.float32)
        self._b = np.zeros(1, dtype=np.float32)
        self._model_parameters = [self._w, self._b]
        self._optimizer = OptFactory().get_optimizer_by_name(
            optimizer, self._model_parameters, lr, epoch
        )

        bar = ProgressBar(max_value=epoch, name="LinearSVM")
        ims = []
        train_repeat = self._get_train_repeat(x, batch_size)
        for i in range(epoch):
            self._optimizer.update()
            l = self._batch_training(
                x, y, batch_size, train_repeat, sample_weight, c
            )
            if l < tol:
                bar.terminate()
                break
            self._handle_animation(i, x, y, ims, animation_params, *animation_properties)
            bar.update()
        self._handle_mp4(ims, animation_properties)
Ejemplo n.º 11
0
 def predict(self, x, get_raw_results=False, **kwargs):
     x = NNDist._transfer_x(np.asarray(x))
     rs = []
     batch_size = floor(1e6 / np.prod(x.shape[1:]))
     epoch = int(ceil(len(x) / batch_size))
     output = self._sess.graph.get_tensor_by_name(self._output)
     bar = ProgressBar(max_value=epoch, name="Predict")
     for i in range(epoch):
         if i == epoch - 1:
             rs.append(self._sess.run(output, {
                 self._entry: x[i * batch_size:]
             }))
         else:
             rs.append(self._sess.run(output, {
                 self._entry: x[i * batch_size:(i + 1) * batch_size]
             }))
         bar.update()
     y_pred = np.vstack(rs).astype(np.float32)
     return y_pred if get_raw_results else np.argmax(y_pred, axis=1)
Ejemplo n.º 12
0
        def fit(self, x, y, c=None, lr=None, batch_size=None, epoch=None, tol=None,
                optimizer=None, animation_params=None):
            if c is None:
                c = self._params["c"]
            if lr is None:
                lr = self._params["lr"]
            if batch_size is None:
                batch_size = self._params["batch_size"]
            if epoch is None:
                epoch = self._params["epoch"]
            if tol is None:
                tol = self._params["tol"]
            if optimizer is None:
                optimizer = self._params["optimizer"]
            *animation_properties, animation_params = self._get_animation_params(animation_params)
            x, y = np.atleast_2d(x), np.asarray(y, dtype=np.float32)
            y_2d = y[..., None]

            self._w = Variable(torch.rand([x.shape[1], 1]), requires_grad=True)
            self._b = Variable(torch.Tensor([0.]), requires_grad=True)
            self._model_parameters = [self._w, self._b]
            self._optimizer = PyTorchOptFac().get_optimizer_by_name(
                optimizer, self._model_parameters, lr, epoch
            )

            x, y, y_2d = self._arr_to_variable(False, x, y, y_2d)
            loss_function = lambda _y, _y_pred: self._loss(_y, _y_pred, c)

            bar = ProgressBar(max_value=epoch, name="TorchLinearSVM")
            ims = []
            train_repeat = self._get_train_repeat(x, batch_size)
            for i in range(epoch):
                self._optimizer.update()
                l = self.batch_training(
                    x, y_2d, batch_size, train_repeat, loss_function
                )
                if l < tol:
                    bar.terminate()
                    break
                self._handle_animation(i, x, y, ims, animation_params, *animation_properties)
                bar.update()
            self._handle_mp4(ims, animation_properties)
Ejemplo n.º 13
0
 def predict(self, x):
     self._create_graph()
     x, rs = np.atleast_2d(x).astype(np.float32), []
     with tf.Session() as sess:
         flattened_tensor = sess.graph.get_tensor_by_name(self._output)
         print("Predicting...")
         batch_size = math.floor(1e6 / np.prod(x.shape[1:]))
         epoch = math.ceil(len(x) / batch_size)  # type: int
         bar = ProgressBar(max_value=epoch, name="Predict")
         for i in range(epoch):
             if i == epoch - 1:
                 rs.append(sess.run(flattened_tensor, {
                     self._entry: x[i*batch_size:]
                 }))
             else:
                 rs.append(sess.run(flattened_tensor, {
                     self._entry: x[i*batch_size:(i+1)*batch_size]
                 }))
             bar.update()
         return np.vstack(rs).astype(np.float32)
Ejemplo n.º 14
0
    def fit(self, x, y, c=None, lr=None, batch_size=None, epoch=None, tol=None,
            optimizer=None, animation_params=None):
        if c is None:
            c = self._params["c"]
        if lr is None:
            lr = self._params["lr"]
        if batch_size is None:
            batch_size = self._params["batch_size"]
        if epoch is None:
            epoch = self._params["epoch"]
        if tol is None:
            tol = self._params["tol"]
        if optimizer is None:
            optimizer = self._params["optimizer"]
        *animation_properties, animation_params = self._get_animation_params(animation_params)
        x, y = np.atleast_2d(x), np.asarray(y)
        y_2d = y[..., None]

        self._w = tf.Variable(np.zeros([x.shape[1], 1]), dtype=tf.float32, name="w")
        self._b = tf.Variable(0., dtype=tf.float32, name="b")
        self._tfx = tf.placeholder(tf.float32, [None, x.shape[1]])
        self._tfy = tf.placeholder(tf.float32, [None, 1])
        self._y_pred_raw = tf.matmul(self._tfx, self._w) + self._b
        self._y_pred = tf.sign(self._y_pred_raw)
        loss = tf.reduce_sum(
            tf.nn.relu(1 - self._tfy * self._y_pred_raw)
        ) + c * tf.nn.l2_loss(self._w)
        train_step = TFOptFac().get_optimizer_by_name(optimizer, lr).minimize(loss)
        self._sess.run(tf.global_variables_initializer())
        bar = ProgressBar(max_value=epoch, name="TFLinearSVM")
        ims = []
        train_repeat = self._get_train_repeat(x, batch_size)
        for i in range(epoch):
            l = self._batch_training(x, y_2d, batch_size, train_repeat, loss, train_step)
            if l < tol:
                bar.terminate()
                break
            self._handle_animation(i, x, y, ims, animation_params, *animation_properties)
            bar.update()
        self._handle_mp4(ims, animation_properties)
Ejemplo n.º 15
0
    def fit(self, x, y, sample_weight=None, c=None, lr=None, epoch=None, tol=None, animation_params=None):
        if sample_weight is None:
            sample_weight = self._params["sample_weight"]
        if c is None:
            c = self._params["c"]
        if lr is None:
            lr = self._params["lr"]
        if epoch is None:
            epoch = self._params["epoch"]
        if tol is None:
            tol = self._params["tol"]
        *animation_properties, animation_params = self._get_animation_params(animation_params)
        x, y = np.atleast_2d(x), np.asarray(y)
        if sample_weight is None:
            sample_weight = np.ones(len(y))
        else:
            sample_weight = np.asarray(sample_weight) * len(y)

        self._w = np.zeros(x.shape[1])
        self._b = 0
        ims = []
        bar = ProgressBar(max_value=epoch, name="LinearSVM")
        for i in range(epoch):
            err = (1 - self.predict(x, get_raw_results=True) * y) * sample_weight
            indices = np.random.permutation(len(y))
            idx = indices[np.argmax(err[indices])]
            if err[idx] <= tol:
                bar.update(epoch)
                break
            delta = lr * c * y[idx] * sample_weight[idx]
            self._w *= 1 - lr
            self._w += delta * x[idx]
            self._b += delta
            self._handle_animation(i, x, y, ims, animation_params, *animation_properties)
            bar.update()
        self._handle_mp4(ims, animation_properties)
Ejemplo n.º 16
0
 def fit(self, x, y, sample_weight=None, tree=None, epoch=None, feature_bound=None, **kwargs):
     if sample_weight is None:
         sample_weight = self._params["sample_weight"]
     if tree is None:
         tree = self._params["tree"]
     if epoch is None:
         epoch = self._params["epoch"]
     if feature_bound is None:
         feature_bound = self._params["feature_bound"]
     x, y = np.atleast_2d(x), np.asarray(y)
     n_sample = len(y)
     self._tree = tree
     bar = ProgressBar(max_value=epoch, name="RF")
     for _ in range(epoch):
         tmp_tree = RandomForest.cvd_trees[tree](**kwargs)
         indices = np.random.randint(n_sample, size=n_sample)
         if sample_weight is None:
             local_weight = None
         else:
             local_weight = sample_weight[indices]
             local_weight /= local_weight.sum()
         tmp_tree.fit(x[indices], y[indices], sample_weight=local_weight, feature_bound=feature_bound)
         self._trees.append(deepcopy(tmp_tree))
         bar.update()
Ejemplo n.º 17
0
    def fit(self,
            x=None,
            y=None,
            x_test=None,
            y_test=None,
            lr=0.01,
            lb=0.01,
            epoch=20,
            weight_scale=1,
            batch_size=256,
            record_period=1,
            train_only=False,
            optimizer=None,
            show_loss=True,
            metrics=None,
            do_log=False,
            verbose=None,
            visualize=False,
            visualize_setting=None,
            draw_detailed_network=False,
            weight_average=None):

        x, y = self._feed_data(x, y)
        self._lr = lr
        self._init_optimizer(optimizer)
        print("Optimizer: ", self._optimizer.name)
        print("-" * 30)

        if not self._layers:
            raise BuildNetworkError(
                "Please provide layers before fitting data")

        if y.shape[1] != self._current_dimension:
            raise BuildNetworkError(
                "Output layer's shape should be {}, {} found".format(
                    self._current_dimension, y.shape[1]))

        (x_train, x_test), (y_train,
                            y_test) = self.split_data(x, y, x_test, y_test,
                                                      train_only)
        train_len = len(x_train)
        batch_size = min(batch_size, train_len)
        do_random_batch = train_len >= batch_size
        train_repeat = int(train_len / batch_size) + 1
        self._feed_data(x_train, y_train)

        self._tfx = tf.placeholder(tf.float32, shape=[None, *x.shape[1:]])
        self._tfy = tf.placeholder(tf.float32, shape=[None, y.shape[1]])

        self._metrics = ["acc"] if metrics is None else metrics
        for i, metric in enumerate(self._metrics):
            if isinstance(metric, str):
                if metric not in self._available_metrics:
                    raise BuildNetworkError(
                        "Metric '{}' is not implemented".format(metric))
                self._metrics[i] = self._available_metrics[metric]
        self._metric_names = [_m.__name__ for _m in self._metrics]

        self._logs = {
            name: [[] for _ in range(len(self._metrics) + 1)]
            for name in ("train", "test")
        }
        if verbose is not None:
            self.verbose = verbose

        bar = ProgressBar(min_value=0,
                          max_value=max(1, epoch // record_period),
                          name="Epoch")
        if self.verbose >= NNVerbose.EPOCH:
            bar.start()
        img = None

        with self._sess.as_default() as sess:

            # Session
            self._y_pred = self.get_rs(self._tfx)
            self._l2_loss = self._get_l2_loss(lb)
            self._loss = self.get_rs(self._tfx, self._tfy) + self._l2_loss
            self._activations = self._get_activations(self._tfx)
            self._init_train_step(sess)
            for weight in self._tf_weights:
                weight *= weight_scale

            # Log
            merge_op = tf.summary.merge_all()
            writer = tf.summary.FileWriter("Logs", sess.graph)
            writer.add_graph(sess.graph)
            with tf.name_scope("Global_Summaries"):
                tf.summary.scalar("l2 loss", self._l2_loss)
                tf.summary.scalar("loss", self._loss)
                tf.summary.scalar("lr", self._lr)

            sub_bar = ProgressBar(min_value=0,
                                  max_value=train_repeat * record_period - 1,
                                  name="Iteration")
            for counter in range(epoch):
                if self.verbose >= NNVerbose.EPOCH and counter % record_period == 0:
                    sub_bar.start()
                for i in range(train_repeat):
                    if do_random_batch:
                        batch = np.random.choice(train_len, batch_size)
                        x_batch, y_batch = x_train[batch], y_train[batch]
                    else:
                        x_batch, y_batch = x_train, y_train
                    feed_dict = {self._tfx: x_batch, self._tfy: y_batch}
                    self._train_step.run(feed_dict=feed_dict)

                    summary = sess.run(merge_op, feed_dict=feed_dict)
                    writer.add_summary(summary, counter * train_repeat + i)

                    if self.verbose >= NNVerbose.DEBUG:
                        pass
                    if self.verbose >= NNVerbose.EPOCH:
                        if sub_bar.update(
                        ) and self.verbose >= NNVerbose.METRICS_DETAIL:
                            self._append_log(x, y, "train", get_loss=show_loss)
                            self._append_log(x_test,
                                             y_test,
                                             "test",
                                             get_loss=show_loss)
                            self._print_metric_logs(show_loss, "train")
                            self._print_metric_logs(show_loss, "test")
                if self.verbose >= NNVerbose.EPOCH:
                    sub_bar.update()

                if (counter + 1) % record_period == 0:
                    if do_log:
                        self._append_log(x, y, "train", get_loss=show_loss)
                        self._append_log(x_test,
                                         y_test,
                                         "test",
                                         get_loss=show_loss)
                        if self.verbose >= NNVerbose.METRICS:
                            self._print_metric_logs(show_loss, "train")
                            self._print_metric_logs(show_loss, "test")
                    if visualize:
                        if visualize_setting is None:
                            self.visualize2d(x_test, y_test)
                        else:
                            self.visualize2d(x_test, y_test,
                                             *visualize_setting)
                    if x_test.shape[1] == 2:
                        if draw_detailed_network:
                            img = self._draw_detailed_network(
                                weight_average=weight_average)
                    if self.verbose >= NNVerbose.EPOCH:
                        bar.update(counter // record_period + 1)
                        sub_bar = ProgressBar(
                            min_value=0,
                            max_value=train_repeat * record_period - 1,
                            name="Iteration")

        if img is not None:
            cv2.waitKey(0)
            cv2.destroyAllWindows()

        return self._logs
Ejemplo n.º 18
0
        _prob_lst = prob_lists[i]
        x_test, y_test = test_sets[i]
        y_pred = np.array(
            [pick_best(sentence, _prob_lst) for sentence in x_test])
        y_test = np.array(y_test)
        acc_lst.append(100 * np.sum(y_pred == y_test) / len(y_pred))
    return acc_lst


if __name__ == '__main__':
    _rs, epoch = [], 10
    bar = ProgressBar(max_value=epoch, name="_NB")
    bar.start()
    for _ in range(epoch):
        _rs.append(test(*train()))
        bar.update()
    _rs = np.array(_rs).T
    # x_base = np.arange(len(_rs[0])) + 1
    # plt.figure()
    # for _acc_lst in _rs:
    #     plt.plot(x_base, _acc_lst)
    # plt.plot(x_base, np.average(_rs, axis=0), linewidth=4, label="Average")
    # plt.xlim(1, epoch)
    # plt.ylim(np.min(_rs), np.max(_rs)+2)
    # plt.legend(loc="lower right")
    # plt.show()
    plt.figure()
    plt.boxplot(_rs.T, vert=False, showmeans=True)
    plt.show()
    _rs = np.array(_rs).ravel()
    print("Acc Mean     : {:8.6}".format(np.average(_rs)))
Ejemplo n.º 19
0
    def fit(self, x=None, y=None, lr=0.01, epoch=10, batch_size=128, train_rate=None,
            verbose=0, metrics=None, record_period=100):

        # Initialize
        self.verbose = verbose
        self._add_cost_layer()
        self._init_optimizers(lr)
        layer_width = len(self._layers)

        if train_rate is not None:
            train_rate = float(train_rate)
            train_len = int(len(x) * train_rate)
            shuffle_suffix = np.random.permutation(int(len(x)))
            x, y = x[shuffle_suffix], y[shuffle_suffix]
            x_train, y_train = x[:train_len], y[:train_len]
            x_test, y_test = x[train_len:], y[train_len:]
        else:
            x_train = x_test = x
            y_train = y_test = y

        train_len = len(x_train)
        batch_size = min(batch_size, train_len)
        do_random_batch = train_len >= batch_size
        train_repeat = int(train_len / batch_size) + 1

        self._metrics = ["acc"] if metrics is None else metrics
        for i, metric in enumerate(self._metrics):
            if isinstance(metric, str):
                self._metrics[i] = self._available_metrics[metric]
        self._metric_names = [_m.__name__ for _m in self._metrics]
        self._logs = {
            name: [[] for _ in range(len(self._metrics) + 1)] for name in ("train", "test")
        }

        bar = ProgressBar(max_value=max(1, epoch // record_period), name="Epoch", start=False)
        if self.verbose >= NNVerbose.EPOCH:
            bar.start()

        # Train
        sub_bar = ProgressBar(max_value=train_repeat * record_period - 1, name="Iteration", start=False)
        for counter in range(epoch):
            if self.verbose >= NNVerbose.EPOCH and counter % record_period == 0:
                sub_bar.start()
            for _ in range(train_repeat):
                if do_random_batch:
                    batch = np.random.choice(train_len, batch_size)
                    x_batch, y_batch = x_train[batch], y_train[batch]
                else:
                    x_batch, y_batch = x_train, y_train
                self._w_optimizer.update(); self._b_optimizer.update()
                _activations = self._get_activations(x_batch)
                _deltas = [self._layers[-1].bp_first(y_batch, _activations[-1])]
                for i in range(-1, -len(_activations), -1):
                    _deltas.append(
                        self._layers[i - 1].bp(_activations[i - 1], self._weights[i], _deltas[-1])
                    )
                for i in range(layer_width - 2, 0, -1):
                    self._opt(i, _activations[i - 1], _deltas[layer_width - i - 1])
                self._opt(0, x_batch, _deltas[-1])
                if self.verbose >= NNVerbose.EPOCH:
                    if sub_bar.update() and self.verbose >= NNVerbose.METRICS_DETAIL:
                        self._append_log(x_train, y_train, "train")
                        self._append_log(x_test, y_test, "test")
                        self._print_metric_logs("train")
                        self._print_metric_logs("test")
            if self.verbose >= NNVerbose.EPOCH:
                sub_bar.update()
            if (counter + 1) % record_period == 0:
                self._append_log(x_train, y_train, "train")
                self._append_log(x_test, y_test, "test")
                if self.verbose >= NNVerbose.METRICS:
                    self._print_metric_logs("train")
                    self._print_metric_logs("test")
                if self.verbose >= NNVerbose.EPOCH:
                    bar.update(counter // record_period + 1)
                    sub_bar = ProgressBar(max_value=train_repeat * record_period - 1, name="Iteration", start=False)
Ejemplo n.º 20
0
    def fit(self,
            x=None, y=None, x_test=None, y_test=None,
            batch_size=256, record_period=1, train_only=False,
            optimizer=None, w_optimizer=None, b_optimizer=None,
            lr=0.01, lb=0.01, epoch=20, weight_scale=1, apply_bias=True,
            show_loss=True, metrics=None, do_log=True, verbose=None,
            visualize=False, visualize_setting=None,
            draw_weights=False, draw_network=False, draw_detailed_network=False,
            draw_img_network=False, img_shape=None, weight_average=None):

        if draw_img_network and img_shape is None:
            raise BuildNetworkError("Please provide image's shape to draw_img_network")

        x, y = self._feed_data(x, y)
        self._lr, self._epoch = lr, epoch
        for weight in self._weights:
            weight *= weight_scale
        if not self._w_optimizer or not self._b_optimizer:
            if not self._optimizer_name:
                if optimizer is None:
                    optimizer = "Adam"
                self._w_optimizer = optimizer if w_optimizer is None else w_optimizer
                self._b_optimizer = optimizer if b_optimizer is None else b_optimizer
            else:
                if not self._w_optimizer:
                    self._w_optimizer = self._optimizer_name
                if not self._b_optimizer:
                    self._b_optimizer = self._optimizer_name
        self._init_optimizer()
        assert isinstance(self._w_optimizer, Optimizer) and isinstance(self._b_optimizer, Optimizer)
        print()
        print("=" * 30)
        print("Optimizers")
        print("-" * 30)
        print("w: {}\nb: {}".format(self._w_optimizer, self._b_optimizer))
        print("-" * 30)
        if not self._layers:
            raise BuildNetworkError("Please provide layers before fitting data")
        if y.shape[1] != self._current_dimension:
            raise BuildNetworkError("Output layer's shape should be {}, {} found".format(
                self._current_dimension, y.shape[1]))

        (x_train, x_test), (y_train, y_test) = self.split_data(
            x, y, x_test, y_test, train_only)
        train_len = len(x_train)
        batch_size = min(batch_size, train_len)
        do_random_batch = train_len >= batch_size
        train_repeat = int(train_len / batch_size) + 1
        self._regularization_param = 1 - lb * lr / batch_size
        self._feed_data(x_train, y_train)

        self._metrics = ["acc"] if metrics is None else metrics
        for i, metric in enumerate(self._metrics):
            if isinstance(metric, str):
                if metric not in self._available_metrics:
                    raise BuildNetworkError("Metric '{}' is not implemented".format(metric))
                self._metrics[i] = self._available_metrics[metric]
        self._metric_names = [_m.__name__ for _m in self._metrics]

        self._logs = {
            name: [[] for _ in range(len(self._metrics) + 1)] for name in ("train", "cv", "test")
        }
        if verbose is not None:
            self.verbose = verbose

        layer_width = len(self._layers)
        self._whether_apply_bias = apply_bias

        bar = ProgressBar(min_value=0, max_value=max(1, epoch // record_period), name="Epoch")
        if self.verbose >= NNVerbose.EPOCH:
            bar.start()
        img = None

        weight_trace = [[[org] for org in weight] for weight in self._weights]
        sub_bar = ProgressBar(min_value=0, max_value=train_repeat * record_period - 1, name="Iteration")
        for counter in range(epoch):
            self._w_optimizer.update()
            self._b_optimizer.update()
            _xs, _activations = [], []
            if self.verbose >= NNVerbose.EPOCH and counter % record_period == 0:
                sub_bar.start()

            for _i in range(train_repeat):
                if do_random_batch:
                    batch = np.random.choice(train_len, batch_size)
                    x_batch, y_batch = x_train[batch], y_train[batch]
                else:
                    x_batch, y_batch = x_train, y_train

                _activations = self._get_activations(x_batch)
                if self.verbose >= NNVerbose.DEBUG:
                    _xs = [x_batch.dot(self._weights[0])]
                    for i, weight in enumerate(self._weights[1:]):
                        _xs.append(_activations[i].dot(weight))

                _deltas = [self._layers[-1].bp_first(y_batch, _activations[-1])]
                for i in range(-1, -len(_activations), -1):
                    _deltas.append(self._layers[i - 1].bp(_activations[i - 1], self._weights[i], _deltas[-1]))

                for i in range(layer_width - 1, 0, -1):
                    if not isinstance(self._layers[i], SubLayer):
                        self._opt(i, _activations[i - 1], _deltas[layer_width - i - 1])
                self._opt(0, x_batch, _deltas[-1])

                if draw_weights:
                    for i, weight in enumerate(self._weights):
                        for j, new_weight in enumerate(weight.copy()):
                            weight_trace[i][j].append(new_weight)
                if self.verbose >= NNVerbose.DEBUG:

                    print("")
                    print("## Activations ##")
                    for i, ac in enumerate(_activations):
                        print("-- Layer {} ({}) --".format(i + 1, self._layers[i].name))
                        print(_xs[i])
                        print(ac)

                    print("")
                    print("## Deltas ##")
                    for i, delta in zip(range(len(_deltas) - 1, -1, -1), _deltas):
                        print("-- Layer {} ({}) --".format(i + 1, self._layers[i].name))
                        print(delta)

                    _ = input("Press any key to continue...")
                if self.verbose >= NNVerbose.EPOCH:
                    if sub_bar.update() and self.verbose >= NNVerbose.METRICS_DETAIL:
                        self._append_log(x, y, "train", get_loss=show_loss)
                        self._append_log(x_test, y_test, "cv", get_loss=show_loss)
                        self._print_metric_logs(show_loss, "train")
                        self._print_metric_logs(show_loss, "cv")

            if self.verbose >= NNVerbose.EPOCH:
                sub_bar.update()
            if do_log:
                self._append_log(x, y, "train", get_loss=show_loss)
                self._append_log(x_test, y_test, "cv", get_loss=show_loss)
            if (counter + 1) % record_period == 0:
                if do_log and self.verbose >= NNVerbose.METRICS:
                    self._print_metric_logs(show_loss, "train")
                    self._print_metric_logs(show_loss, "cv")
                if visualize:
                    if visualize_setting is None:
                        self.visualize2d(x_test, y_test)
                    else:
                        self.visualize2d(x_test, y_test, *visualize_setting)
                if x_test.shape[1] == 2:
                    if draw_network:
                        img = self._draw_network(weight_average=weight_average, activations=_activations)
                    if draw_detailed_network:
                        img = self._draw_detailed_network(weight_average=weight_average)
                elif draw_img_network:
                    img = self._draw_img_network(img_shape, weight_average=weight_average)
                if self.verbose >= NNVerbose.EPOCH:
                    bar.update(counter // record_period + 1)
                    sub_bar = ProgressBar(min_value=0, max_value=train_repeat * record_period - 1, name="Iteration")

        if do_log:
            self._append_log(x_test, y_test, "test", get_loss=show_loss)
        if img is not None:
            cv2.waitKey(0)
            cv2.destroyAllWindows()

        if draw_weights:
            ts = np.arange(epoch * train_repeat + 1)
            for i, weight in enumerate(self._weights):
                plt.figure()
                for j in range(len(weight)):
                    plt.plot(ts, weight_trace[i][j])
                plt.title("Weights toward layer {} ({})".format(i + 1, self._layers[i].name))
                plt.show()

        return self._logs
Ejemplo n.º 21
0
    def fit(self, im, om, generator, cell=None, provide_sequence_length=None,
            squeeze=None, use_sparse_labels=None, embedding_size=None, use_final_state=None,
            n_hidden=None, n_history=None, activation=None, lr=None, epoch=None, n_iter=None,
            batch_size=None, optimizer=None, eps=None, verbose=None):
        if cell is None:
            cell = self._params["cell"]
        if provide_sequence_length is None:
            provide_sequence_length = self._params["provide_sequence_length"]
        if n_hidden is None:
            n_hidden = self._params["n_hidden"]
        if n_history is None:
            n_history = self._params["n_history"]
        if squeeze:
            self._squeeze = True
        if use_sparse_labels:
            self._use_sparse_labels = True
        if self._squeeze and n_history == 0:
            n_history = 1
        if embedding_size:
            self._embedding_size = embedding_size
        if use_final_state:
            self._use_final_state = True
        if activation is None:
            activation = self._params["activation"]
        if lr is None:
            lr = self._params["lr"]
        if epoch is None:
            epoch = self._params["epoch"]
        if n_iter is None:
            n_iter = self._params["n_iter"]
        if optimizer is None:
            optimizer = self._params["optimizer"]
        if batch_size is None:
            batch_size = self._params["batch_size"]
        if eps is None:
            eps = self._params["eps"]
        if verbose is None:
            verbose = self._params["verbose"]

        self._generator = generator
        self._im, self._om, self._activation = im, om, activation
        self._optimizer = OptFactory().get_optimizer_by_name(optimizer, lr)
        self._define_input(im, om)

        self._cell = cell(n_hidden)
        self._prepare_for_dynamic_rnn(provide_sequence_length)
        rnn_outputs, rnn_final_state = tf.nn.dynamic_rnn(
            self._cell, self._input, return_all_states=True,
            sequence_length=self._sequence_lengths, initial_state=self._initial_state
        )
        self._get_output(rnn_outputs, rnn_final_state, n_history)
        loss = self._get_loss(eps)
        train_step = self._optimizer.minimize(loss)
        self._log["iter_err"] = []
        self._log["epoch_err"] = []
        self._sess.run(tf.global_variables_initializer())
        bar = ProgressBar(max_value=epoch, name="Epoch", start=False)
        if verbose >= 2:
            bar.start()
        for _ in range(epoch):
            epoch_err = 0
            sub_bar = ProgressBar(max_value=n_iter, name="Iter", start=False)
            if verbose >= 2:
                sub_bar.start()
            for __ in range(n_iter):
                if provide_sequence_length:
                    x_batch, y_batch, sequence_length = self._generator.gen(batch_size)
                    feed_dict = {
                        self._tfx: x_batch, self._tfy: y_batch,
                        self._sequence_lengths: sequence_length
                    }
                else:
                    x_batch, y_batch = self._generator.gen(batch_size)
                    feed_dict = {self._tfx: x_batch, self._tfy: y_batch}
                iter_err = self._sess.run([loss, train_step], feed_dict)[0]
                self._log["iter_err"].append(iter_err)
                epoch_err += iter_err
                if verbose >= 2:
                    sub_bar.update()
            self._log["epoch_err"].append(epoch_err / n_iter)
            if verbose >= 1:
                self._verbose()
                if verbose >= 2:
                    bar.update()
Ejemplo n.º 22
0
    def fit(self,
            x,
            y,
            lr=None,
            epoch=None,
            batch_size=None,
            train_rate=None,
            optimizer=None,
            metrics=None,
            record_period=None,
            verbose=None,
            preview=None):
        if lr is None:
            lr = self._params["lr"]
        if epoch is None:
            epoch = self._params["epoch"]
        if optimizer is None:
            optimizer = self._params["optimizer"]
        if batch_size is None:
            batch_size = self._params["batch_size"]
        if train_rate is None:
            train_rate = self._params["train_rate"]
        if metrics is None:
            metrics = self._params["metrics"]
        if record_period is None:
            record_period = self._params["record_period"]
        if verbose is None:
            verbose = self._params["verbose"]
        if preview is None:
            preview = self._params["preview"]

        x = NN._transfer_x(x)
        self.verbose = verbose
        self._optimizer = OptFactory().get_optimizer_by_name(optimizer, lr)
        self._tfx = tf.placeholder(tf.float32, shape=[None, *x.shape[1:]])
        self._tfy = tf.placeholder(tf.float32, shape=[None, y.shape[1]])

        if train_rate is not None:
            train_rate = float(train_rate)
            train_len = int(len(x) * train_rate)
            shuffle_suffix = np.random.permutation(int(len(x)))
            x, y = x[shuffle_suffix], y[shuffle_suffix]
            x_train, y_train = x[:train_len], y[:train_len]
            x_test, y_test = x[train_len:], y[train_len:]
        else:
            x_train = x_test = x
            y_train = y_test = y
        y_train_classes = np.argmax(y_train, axis=1)
        y_test_classes = np.argmax(y_test, axis=1)

        train_len = len(x_train)
        batch_size = min(batch_size, train_len)
        do_random_batch = train_len >= batch_size
        train_repeat, record_period = int(train_len / batch_size) + 1, min(
            record_period, epoch)

        if metrics is None:
            metrics = []
        self._metrics = self.get_metrics(metrics)
        self._metric_names = [_m.__name__ for _m in metrics]
        self._logs = {
            name: [[] for _ in range(len(metrics) + 1)]
            for name in ("train", "test")
        }

        bar = ProgressBar(min_value=0,
                          max_value=max(1, epoch // record_period),
                          name="Epoch")
        if self.verbose >= NNVerbose.EPOCH:
            bar.start()

        if preview:
            self._preview()

        with self._sess.as_default() as sess:
            self._y_pred = self._get_rs(self._tfx, predict=False)
            self._cost = self._layers[-1].calculate(self._tfy, self._y_pred)
            self._train_step = self._optimizer.minimize(self._cost)
            sess.run(tf.global_variables_initializer())
            sub_bar = ProgressBar(min_value=0,
                                  max_value=train_repeat * record_period - 1,
                                  name="Iteration")
            for counter in range(epoch):
                if self.verbose >= NNVerbose.EPOCH and counter % record_period == 0:
                    sub_bar.start()
                for _i in range(train_repeat):
                    if do_random_batch:
                        batch = np.random.choice(train_len, batch_size)
                        x_batch, y_batch = x_train[batch], y_train[batch]
                    else:
                        x_batch, y_batch = x_train, y_train
                    self._train_step.run(feed_dict={
                        self._tfx: x_batch,
                        self._tfy: y_batch
                    })
                    if self.verbose >= NNVerbose.EPOCH:
                        if sub_bar.update(
                        ) and self.verbose >= NNVerbose.METRICS_DETAIL:
                            self._append_log(x_train, y_train, y_train_classes,
                                             "train")
                            self._append_log(x_test, y_test, y_test_classes,
                                             "test")
                            self._print_metric_logs("train")
                            self._print_metric_logs("test")
                if self.verbose >= NNVerbose.EPOCH:
                    sub_bar.update()
                if (counter + 1) % record_period == 0:
                    if self.verbose >= NNVerbose.METRICS:
                        self._append_log(x_train, y_train, y_train_classes,
                                         "train")
                        self._append_log(x_test, y_test, y_test_classes,
                                         "test")
                        self._print_metric_logs("train")
                        self._print_metric_logs("test")
                    if self.verbose >= NNVerbose.EPOCH:
                        bar.update(counter // record_period + 1)
                        sub_bar = ProgressBar(
                            min_value=0,
                            max_value=train_repeat * record_period - 1,
                            name="Iteration")
Ejemplo n.º 23
0
    def fit(self, x, y, sample_weight=None, kernel=None, epoch=None,
            x_test=None, y_test=None, metrics=None, animation_params=None, **kwargs):
        if sample_weight is None:
            sample_weight = self._params["sample_weight"]  # type: list
        if kernel is None:
            kernel = self._params["kernel"]
        if epoch is None:
            epoch = self._params["epoch"]
        if x_test is None:
            x_test = self._params["x_test"]  # type: list
        if y_test is None:
            y_test = self._params["y_test"]  # type: list
        if metrics is None:
            metrics = self._params["metrics"]  # type: list
        *animation_properties, animation_params = self._get_animation_params(animation_params)
        self._x, self._y = np.atleast_2d(x), np.asarray(y)
        if kernel == "poly":
            _p = kwargs.get("p", self._params["p"])
            self._kernel_name = "Polynomial"
            self._kernel_param = "degree = {}".format(_p)
            self._kernel = lambda _x, _y: KernelBase._poly(_x, _y, _p)
        elif kernel == "rbf":
            _gamma = kwargs.get("gamma", 1 / self._x.shape[1])
            self._kernel_name = "RBF"
            self._kernel_param = r"$\gamma = {:8.6}$".format(_gamma)
            self._kernel = lambda _x, _y: KernelBase._rbf(_x, _y, _gamma)
        else:
            raise NotImplementedError("Kernel '{}' has not defined".format(kernel))
        if sample_weight is None:
            sample_weight = np.ones(len(y))
        else:
            sample_weight = np.asarray(sample_weight) * len(y)

        self._alpha, self._w, self._prediction_cache = (
            np.zeros(len(x)), np.zeros(len(x)), np.zeros(len(x)))
        self._gram = self._kernel(self._x, self._x)
        self._b = 0
        self._prepare(sample_weight, **kwargs)

        fit_args, logs, ims = [], [], []
        for name, arg in zip(self._fit_args_names, self._fit_args):
            if name in kwargs:
                arg = kwargs[name]
            fit_args.append(arg)
        if self._do_log:
            if metrics is not None:
                self.get_metrics(metrics)
            test_gram = None
            if x_test is not None and y_test is not None:
                x_cv, y_cv = np.atleast_2d(x_test), np.asarray(y_test)
                test_gram = self._kernel(self._x, x_cv)
            else:
                x_cv, y_cv = self._x, self._y
        else:
            y_cv = test_gram = None

        if self._is_torch:
            y_cv, self._x, self._y = self._torch_transform(y_cv)

        bar = ProgressBar(max_value=epoch, name=str(self))
        for i in range(epoch):
            if self._fit(sample_weight, *fit_args):
                bar.terminate()
                break
            if self._do_log and metrics is not None:
                local_logs = []
                for metric in metrics:
                    if test_gram is None:
                        if self._is_torch:
                            local_y = self._y.data.numpy()
                        else:
                            local_y = self._y
                        local_logs.append(metric(local_y, np.sign(self._prediction_cache)))
                    else:
                        if self._is_torch:
                            local_y = y_cv.data.numpy()
                        else:
                            local_y = y_cv
                        local_logs.append(metric(local_y, self.predict(test_gram, gram_provided=True)))
                logs.append(local_logs)
            self._handle_animation(i, self._x, self._y, ims, animation_params, *animation_properties)
            bar.update()
        self._handle_mp4(ims, animation_properties)
        return logs
Ejemplo n.º 24
0
    def fit(self, x, y, lr=0.01, epoch=10, batch_size=256, train_rate=None,
            optimizer="Adam", metrics=None, record_period=100, verbose=1):

        self.verbose = verbose
        self._add_cost_layer()
        self._init_optimizers(optimizer, lr, epoch)
        layer_width = len(self._layers)
        self._preview()

        if train_rate is not None:
            train_rate = float(train_rate)
            train_len = int(len(x) * train_rate)
            shuffle_suffix = np.random.permutation(len(x))
            x, y = x[shuffle_suffix], y[shuffle_suffix]
            x_train, y_train = x[:train_len], y[:train_len]
            x_test, y_test = x[train_len:], y[train_len:]
        else:
            x_train = x_test = x
            y_train = y_test = y
        y_train_classes = np.argmax(y_train, axis=1)
        y_test_classes = np.argmax(y_test, axis=1)

        train_len = len(x_train)
        batch_size = min(batch_size, train_len)
        do_random_batch = train_len >= batch_size
        train_repeat = int(train_len / batch_size) + 1

        if metrics is None:
            metrics = []
        self._metrics = self.get_metrics(metrics)
        self._metric_names = [_m.__name__ for _m in metrics]
        self._logs = {
            name: [[] for _ in range(len(metrics) + 1)] for name in ("train", "test")
        }

        bar = ProgressBar(min_value=0, max_value=max(1, epoch // record_period), name="Epoch")
        if self.verbose >= NNVerbose.EPOCH:
            bar.start()

        sub_bar = ProgressBar(min_value=0, max_value=train_repeat * record_period - 1, name="Iteration")
        for counter in range(epoch):
            if self.verbose >= NNVerbose.EPOCH and counter % record_period == 0:
                sub_bar.start()
            for _ in range(train_repeat):
                if do_random_batch:
                    batch = np.random.choice(train_len, batch_size)
                    x_batch, y_batch = x_train[batch], y_train[batch]
                else:
                    x_batch, y_batch = x_train, y_train
                self._w_optimizer.update()
                self._b_optimizer.update()
                _activations = self._get_activations(x_batch)
                _deltas = [self._layers[-1].bp_first(y_batch, _activations[-1])]
                for i in range(-1, -len(_activations), -1):
                    _deltas.append(
                        self._layers[i - 1].bp(_activations[i - 1], self._weights[i], _deltas[-1])
                    )
                for i in range(layer_width - 2, 0, -1):
                    self._opt(i, _activations[i - 1], _deltas[layer_width - i - 1])
                self._opt(0, x_batch, _deltas[-1])
                if self.verbose >= NNVerbose.EPOCH:
                    if sub_bar.update() and self.verbose >= NNVerbose.METRICS_DETAIL:
                        self._append_log(x_train, y_train, y_train_classes, "train")
                        self._append_log(x_test, y_test, y_test_classes, "test")
                        self._print_metric_logs("train")
                        self._print_metric_logs("test")
            if self.verbose >= NNVerbose.EPOCH:
                sub_bar.update()
            if (counter + 1) % record_period == 0:
                self._append_log(x_train, y_train, y_train_classes, "train")
                self._append_log(x_test, y_test, y_test_classes, "test")
                if self.verbose >= NNVerbose.METRICS:
                    self._print_metric_logs("train")
                    self._print_metric_logs("test")
                if self.verbose >= NNVerbose.EPOCH:
                    bar.update(counter // record_period + 1)
                    sub_bar = ProgressBar(min_value=0, max_value=train_repeat * record_period - 1, name="Iteration")
Ejemplo n.º 25
0
    def fit(self,
            x,
            y,
            sample_weight=None,
            kernel=None,
            epoch=None,
            x_test=None,
            y_test=None,
            metrics=None,
            animation_params=None,
            **kwargs):
        if sample_weight is None:
            sample_weight = self._params["sample_weight"]  # type: list
        if kernel is None:
            kernel = self._params["kernel"]
        if epoch is None:
            epoch = self._params["epoch"]
        if x_test is None:
            x_test = self._params["x_test"]  # type: list
        if y_test is None:
            y_test = self._params["y_test"]  # type: list
        if metrics is None:
            metrics = self._params["metrics"]  # type: list
        *animation_properties, animation_params = self._get_animation_params(
            animation_params)
        self._x, self._y = np.atleast_2d(x), np.asarray(y)
        if kernel == "poly":
            _p = kwargs.get("p", self._params["p"])
            self._kernel_name = "Polynomial"
            self._kernel_param = "degree = {}".format(_p)
            self._kernel = lambda _x, _y: KernelBase._poly(_x, _y, _p)
        elif kernel == "rbf":
            _gamma = kwargs.get("gamma", 1 / self._x.shape[1])
            self._kernel_name = "RBF"
            self._kernel_param = r"$\gamma = {:8.6}$".format(_gamma)
            self._kernel = lambda _x, _y: KernelBase._rbf(_x, _y, _gamma)
        else:
            raise NotImplementedError(
                "Kernel '{}' has not defined".format(kernel))
        if sample_weight is None:
            sample_weight = np.ones(len(y))
        else:
            sample_weight = np.asarray(sample_weight) * len(y)

        self._alpha, self._w, self._prediction_cache = (np.zeros(len(x)),
                                                        np.zeros(len(x)),
                                                        np.zeros(len(x)))
        self._gram = self._kernel(self._x, self._x)
        self._b = 0
        self._prepare(sample_weight, **kwargs)

        fit_args, logs, ims = [], [], []
        for name, arg in zip(self._fit_args_names, self._fit_args):
            if name in kwargs:
                arg = kwargs[name]
            fit_args.append(arg)
        if self._do_log:
            if metrics is not None:
                self.get_metrics(metrics)
            test_gram = None
            if x_test is not None and y_test is not None:
                x_cv, y_cv = np.atleast_2d(x_test), np.asarray(y_test)
                test_gram = self._kernel(self._x, x_cv)
            else:
                x_cv, y_cv = self._x, self._y
        else:
            y_cv = test_gram = None

        if self._is_torch:
            y_cv, self._x, self._y = self._torch_transform(y_cv)

        bar = ProgressBar(max_value=epoch, name=str(self))
        for i in range(epoch):
            if self._fit(sample_weight, *fit_args):
                bar.terminate()
                break
            if self._do_log and metrics is not None:
                local_logs = []
                for metric in metrics:
                    if test_gram is None:
                        if self._is_torch:
                            local_y = self._y.data.numpy()
                        else:
                            local_y = self._y
                        local_logs.append(
                            metric(local_y, np.sign(self._prediction_cache)))
                    else:
                        if self._is_torch:
                            local_y = y_cv.data.numpy()
                        else:
                            local_y = y_cv
                        local_logs.append(
                            metric(local_y,
                                   self.predict(test_gram,
                                                gram_provided=True)))
                logs.append(local_logs)
            self._handle_animation(i, self._x, self._y, ims, animation_params,
                                   *animation_properties)
            bar.update()
        self._handle_mp4(ims, animation_properties)
        return logs
Ejemplo n.º 26
0
    def fit(self, x=None, y=None, lr=0.01, epoch=10, batch_size=128, train_rate=None,
            verbose=0, metrics=None, record_period=100):

        # Initialize
        self.verbose = verbose
        self._add_cost_layer()
        self._init_optimizers(lr)
        layer_width = len(self._layers)

        if train_rate is not None:
            train_rate = float(train_rate)
            train_len = int(len(x) * train_rate)
            shuffle_suffix = np.random.permutation(int(len(x)))
            x, y = x[shuffle_suffix], y[shuffle_suffix]
            x_train, y_train = x[:train_len], y[:train_len]
            x_test, y_test = x[train_len:], y[train_len:]
        else:
            x_train = x_test = x
            y_train = y_test = y

        train_len = len(x_train)
        batch_size = min(batch_size, train_len)
        do_random_batch = train_len >= batch_size
        train_repeat = int(train_len / batch_size) + 1

        self._metrics = ["acc"] if metrics is None else metrics
        for i, metric in enumerate(self._metrics):
            if isinstance(metric, str):
                self._metrics[i] = self._available_metrics[metric]
        self._metric_names = [_m.__name__ for _m in self._metrics]
        self._logs = {
            name: [[] for _ in range(len(self._metrics) + 1)] for name in ("train", "test")
        }

        bar = ProgressBar(max_value=max(1, epoch // record_period), name="Epoch", start=False)
        if self.verbose >= NNVerbose.EPOCH:
            bar.start()

        # Train
        sub_bar = ProgressBar(max_value=train_repeat * record_period - 1, name="Iteration", start=False)
        for counter in range(epoch):
            if self.verbose >= NNVerbose.EPOCH and counter % record_period == 0:
                sub_bar.start()
            for _ in range(train_repeat):
                if do_random_batch:
                    batch = np.random.choice(train_len, batch_size)
                    x_batch, y_batch = x_train[batch], y_train[batch]
                else:
                    x_batch, y_batch = x_train, y_train
                self._w_optimizer.update(); self._b_optimizer.update()
                _activations = self._get_activations(x_batch)
                _deltas = [self._layers[-1].bp_first(y_batch, _activations[-1])]
                for i in range(-1, -len(_activations), -1):
                    _deltas.append(
                        self._layers[i - 1].bp(_activations[i - 1], self._weights[i], _deltas[-1])
                    )
                for i in range(layer_width - 2, 0, -1):
                    self._opt(i, _activations[i - 1], _deltas[layer_width - i - 1])
                self._opt(0, x_batch, _deltas[-1])
                if self.verbose >= NNVerbose.EPOCH:
                    if sub_bar.update() and self.verbose >= NNVerbose.METRICS_DETAIL:
                        self._append_log(x_train, y_train, "train")
                        self._append_log(x_test, y_test, "test")
                        self._print_metric_logs("train")
                        self._print_metric_logs("test")
            if self.verbose >= NNVerbose.EPOCH:
                sub_bar.update()
            if (counter + 1) % record_period == 0:
                self._append_log(x_train, y_train, "train")
                self._append_log(x_test, y_test, "test")
                if self.verbose >= NNVerbose.METRICS:
                    self._print_metric_logs("train")
                    self._print_metric_logs("test")
                if self.verbose >= NNVerbose.EPOCH:
                    bar.update(counter // record_period + 1)
                    sub_bar = ProgressBar(max_value=train_repeat * record_period - 1, name="Iteration", start=False)
Ejemplo n.º 27
0
    def fit(self,
            x=None, y=None, x_test=None, y_test=None,
            batch_size=128, record_period=1, train_only=False,
            optimizer=None, w_optimizer=None, b_optimizer=None,
            lr=0.001, lb=0.001, epoch=20, weight_scale=1, apply_bias=True,
            show_loss=True, metrics=None, do_log=True, verbose=None,
            visualize=False, visualize_setting=None,
            draw_weights=False, animation_params=None):
        self._lr, self._epoch = lr, epoch
        for weight in self._weights:
            weight *= weight_scale
        if not self._w_optimizer or not self._b_optimizer:
            if not self._optimizer_name:
                if optimizer is None:
                    optimizer = "Adam"
                self._w_optimizer = optimizer if w_optimizer is None else w_optimizer
                self._b_optimizer = optimizer if b_optimizer is None else b_optimizer
            else:
                if not self._w_optimizer:
                    self._w_optimizer = self._optimizer_name
                if not self._b_optimizer:
                    self._b_optimizer = self._optimizer_name
        self._init_optimizer()
        assert isinstance(self._w_optimizer, Optimizer) and isinstance(self._b_optimizer, Optimizer)
        print()
        print("=" * 30)
        print("Optimizers")
        print("-" * 30)
        print("w: {}\nb: {}".format(self._w_optimizer, self._b_optimizer))
        print("-" * 30)
        if not self._layers:
            raise BuildNetworkError("Please provide layers before fitting data")
        if y.shape[1] != self._current_dimension:
            raise BuildNetworkError("Output layer's shape should be {}, {} found".format(
                self._current_dimension, y.shape[1]))

        (x_train, x_test), (y_train, y_test) = self._split_data(
            x, y, x_test, y_test, train_only)
        train_len = len(x_train)
        batch_size = min(batch_size, train_len)
        do_random_batch = train_len > batch_size
        train_repeat = 1 if not do_random_batch else int(train_len / batch_size) + 1
        self._regularization_param = 1 - lb * lr / batch_size
        self._get_min_max(x_train, y_train)

        self._metrics = ["acc"] if metrics is None else metrics
        for i, metric in enumerate(self._metrics):
            if isinstance(metric, str):
                if metric not in self._available_metrics:
                    raise BuildNetworkError("Metric '{}' is not implemented".format(metric))
                self._metrics[i] = self._available_metrics[metric]
        self._metric_names = [_m.__name__ for _m in self._metrics]

        self._logs = {
            name: [[] for _ in range(len(self._metrics) + 1)] for name in ("train", "cv", "test")
        }
        if verbose is not None:
            self.verbose = verbose

        layer_width = len(self._layers)
        self._apply_bias = apply_bias

        bar = ProgressBar(max_value=max(1, epoch // record_period), name="Epoch", start=False)
        if self.verbose >= NNVerbose.EPOCH:
            bar.start()
        img, ims = None, []

        if draw_weights:
            weight_trace = [[[org] for org in weight] for weight in self._weights]
        else:
            weight_trace = []

        *animation_properties, animation_params = self._get_animation_params(animation_params)
        sub_bar = ProgressBar(max_value=train_repeat * record_period - 1, name="Iteration", start=False)
        for counter in range(epoch):
            self._w_optimizer.update()
            self._b_optimizer.update()
            if self.verbose >= NNVerbose.ITER and counter % record_period == 0:
                sub_bar.start()
            for _ in range(train_repeat):
                if do_random_batch:
                    batch = np.random.choice(train_len, batch_size)
                    x_batch, y_batch = x_train[batch], y_train[batch]
                else:
                    x_batch, y_batch = x_train, y_train
                activations = self._get_activations(x_batch)

                deltas = [self._layers[-1].bp_first(y_batch, activations[-1])]
                for i in range(-1, -len(activations), -1):
                    deltas.append(self._layers[i - 1].bp(activations[i - 1], self._weights[i], deltas[-1]))

                for i in range(layer_width - 1, 0, -1):
                    if not isinstance(self._layers[i], SubLayer):
                        self._opt(i, activations[i - 1], deltas[layer_width - i - 1])
                self._opt(0, x_batch, deltas[-1])

                if draw_weights:
                    for i, weight in enumerate(self._weights):
                        for j, new_weight in enumerate(weight.copy()):
                            weight_trace[i][j].append(new_weight)
                if self.verbose >= NNVerbose.DEBUG:
                    pass
                if self.verbose >= NNVerbose.ITER:
                    if sub_bar.update() and self.verbose >= NNVerbose.METRICS_DETAIL:
                        self._append_log(x, y, "train", get_loss=show_loss)
                        self._append_log(x_test, y_test, "cv", get_loss=show_loss)
                        self._print_metric_logs(show_loss, "train")
                        self._print_metric_logs(show_loss, "cv")
            if self.verbose >= NNVerbose.ITER:
                sub_bar.update()
            self._handle_animation(
                counter, x, y, ims, animation_params, *animation_properties,
                img=self._draw_2d_network(**animation_params), name="Neural Network"
            )
            if do_log:
                self._append_log(x, y, "train", get_loss=show_loss)
                self._append_log(x_test, y_test, "cv", get_loss=show_loss)
            if (counter + 1) % record_period == 0:
                if do_log and self.verbose >= NNVerbose.METRICS:
                    self._print_metric_logs(show_loss, "train")
                    self._print_metric_logs(show_loss, "cv")
                if visualize:
                    if visualize_setting is None:
                        self.visualize2d(x_test, y_test)
                    else:
                        self.visualize2d(x_test, y_test, *visualize_setting)
                if self.verbose >= NNVerbose.EPOCH:
                    bar.update(counter // record_period + 1)
                    if self.verbose >= NNVerbose.ITER:
                        sub_bar = ProgressBar(max_value=train_repeat * record_period - 1, name="Iteration", start=False)

        if do_log:
            self._append_log(x_test, y_test, "test", get_loss=show_loss)
        if img is not None:
            cv2.waitKey(0)
            cv2.destroyAllWindows()
        if draw_weights:
            ts = np.arange(epoch * train_repeat + 1)
            for i, weight in enumerate(self._weights):
                plt.figure()
                for j in range(len(weight)):
                    plt.plot(ts, weight_trace[i][j])
                plt.title("Weights toward layer {} ({})".format(i + 1, self._layers[i].name))
                plt.show()
        self._handle_mp4(ims, animation_properties, "NN")
        return self._logs
Ejemplo n.º 28
0
    def fit(self, x=None, y=None, lr=0.01, epoch=10, batch_size=128, train_rate=None,
            verbose=0, metrics=None, record_period=100):

        self.verbose = verbose
        x = NNDist._transfer_x(x)
        self._optimizer = Adam(lr)
        self._tfx = tf.placeholder(tf.float32, shape=[None, *x.shape[1:]])
        self._tfy = tf.placeholder(tf.float32, shape=[None, y.shape[1]])

        if train_rate is not None:
            train_rate = float(train_rate)
            train_len = int(len(x) * train_rate)
            shuffle_suffix = np.random.permutation(int(len(x)))
            x, y = x[shuffle_suffix], y[shuffle_suffix]
            x_train, y_train = x[:train_len], y[:train_len]
            x_test, y_test = x[train_len:], y[train_len:]
        else:
            x_train = x_test = x
            y_train = y_test = y

        train_len = len(x_train)
        batch_size = min(batch_size, train_len)
        do_random_batch = train_len >= batch_size
        train_repeat = int(train_len / batch_size) + 1

        self._metrics = ["acc"] if metrics is None else metrics
        for i, metric in enumerate(self._metrics):
            if isinstance(metric, str):
                self._metrics[i] = self._available_metrics[metric]
        self._metric_names = [_m.__name__ for _m in self._metrics]
        self._logs = {
            name: [[] for _ in range(len(self._metrics) + 1)] for name in ("train", "test")
        }

        bar = ProgressBar(min_value=0, max_value=max(1, epoch // record_period), name="Epoch")
        if self.verbose >= NNVerbose.EPOCH:
            bar.start()

        with self._sess.as_default() as sess:

            # Define session
            self._cost = self.get_rs(self._tfx, self._tfy)
            self._y_pred = self.get_rs(self._tfx)
            self._train_step = self._optimizer.minimize(self._cost)
            sess.run(tf.global_variables_initializer())

            # Train
            sub_bar = ProgressBar(min_value=0, max_value=train_repeat * record_period - 1, name="Iteration")
            for counter in range(epoch):
                if self.verbose >= NNVerbose.EPOCH and counter % record_period == 0:
                    sub_bar.start()
                for _i in range(train_repeat):
                    if do_random_batch:
                        batch = np.random.choice(train_len, batch_size)
                        x_batch, y_batch = x_train[batch], y_train[batch]
                    else:
                        x_batch, y_batch = x_train, y_train
                    self._train_step.run(feed_dict={self._tfx: x_batch, self._tfy: y_batch})
                    if self.verbose >= NNVerbose.EPOCH:
                        if sub_bar.update() and self.verbose >= NNVerbose.METRICS_DETAIL:
                            self._append_log(x_train, y_train, "train")
                            self._append_log(x_test, y_test, "test")
                            self._print_metric_logs("train")
                            self._print_metric_logs("test")
                if self.verbose >= NNVerbose.EPOCH:
                    sub_bar.update()
                if (counter + 1) % record_period == 0:
                    self._append_log(x_train, y_train, "train")
                    self._append_log(x_test, y_test, "test")
                    if self.verbose >= NNVerbose.METRICS:
                        self._print_metric_logs("train")
                        self._print_metric_logs("test")
                    if self.verbose >= NNVerbose.EPOCH:
                        bar.update(counter // record_period + 1)
                        sub_bar = ProgressBar(min_value=0, max_value=train_repeat * record_period - 1, name="Iteration")
Ejemplo n.º 29
0
    def fit(self,
            x=None,
            y=None,
            x_test=None,
            y_test=None,
            lr=0.01,
            lb=0.01,
            epoch=20,
            weight_scale=1,
            batch_size=256,
            record_period=1,
            optimizer=None,
            show_loss=True,
            metrics=None,
            do_log=False,
            verbose=None):

        x, y = self._feed_data(x, y)
        self._lr = lr
        self._init_optimizer(optimizer)

        if not self._layers:
            raise BuildNetworkError(
                "Please provide layers before fitting data")

        if y.shape[1] != self._current_dimension:
            raise BuildNetworkError(
                "Output layer's shape should be {}, {} found".format(
                    self._current_dimension, y.shape[1]))

        x_train, y_train, x_test = x, y, NNDist._transfer_x(x_test)
        train_len = len(x_train)
        batch_size = min(batch_size, train_len)
        do_random_batch = train_len >= batch_size
        train_repeat = int(train_len / batch_size) + 1

        with tf.name_scope("Entry"):
            self._tfx = tf.placeholder(tf.float32, shape=[None, *x.shape[1:]])
        self._tfy = tf.placeholder(tf.float32, shape=[None, y.shape[1]])
        if epoch <= 0:
            return

        self._metrics = ["acc"] if metrics is None else metrics
        for i, metric in enumerate(self._metrics):
            if isinstance(metric, str):
                if metric not in self._available_metrics:
                    raise BuildNetworkError(
                        "Metric '{}' is not implemented".format(metric))
                self._metrics[i] = self._available_metrics[metric]
        self._metric_names = [_m.__name__ for _m in self._metrics]

        self._logs = {
            name: [[] for _ in range(len(self._metrics) + 1)]
            for name in ("train", "test")
        }
        if verbose is not None:
            self.verbose = verbose

        bar = ProgressBar(min_value=0,
                          max_value=max(1, epoch // record_period),
                          name="Epoch")
        if self.verbose >= NNVerbose.EPOCH and epoch > 0:
            bar.start()
        img = None

        with self._sess.as_default() as sess:
            # Session
            self._y_pred = self.get_rs(self._tfx)
            self._loss = self.get_rs(self._tfx,
                                     self._tfy) + self._get_l2_loss(lb)
            self._activations = self._get_activations(self._tfx)
            self._init_train_step(sess)
            for weight in self._tf_weights:
                weight *= weight_scale

            sub_bar = ProgressBar(min_value=0,
                                  max_value=train_repeat * record_period - 1,
                                  name="Iteration")
            for counter in range(epoch):
                if self.verbose >= NNVerbose.EPOCH and counter % record_period == 0:
                    sub_bar.start()
                for i in range(train_repeat):
                    if do_random_batch:
                        batch = np.random.choice(train_len, batch_size)
                        x_batch, y_batch = x_train[batch], y_train[batch]
                    else:
                        x_batch, y_batch = x_train, y_train
                    feed_dict = {self._tfx: x_batch, self._tfy: y_batch}
                    self._train_step.run(feed_dict=feed_dict)
                    if self.verbose >= NNVerbose.DEBUG:
                        pass
                    if self.verbose >= NNVerbose.EPOCH:
                        if sub_bar.update(
                        ) and self.verbose >= NNVerbose.METRICS_DETAIL:
                            self._append_log(x, y, "train", get_loss=show_loss)
                            self._append_log(x_test,
                                             y_test,
                                             "test",
                                             get_loss=show_loss)
                            self._print_metric_logs(show_loss, "train")
                            self._print_metric_logs(show_loss, "test")
                if self.verbose >= NNVerbose.EPOCH:
                    sub_bar.update()

                if (counter + 1) % record_period == 0:
                    if do_log:
                        self._append_log(x, y, "train", get_loss=show_loss)
                        if x_test is not None:
                            self._append_log(x_test,
                                             y_test,
                                             "test",
                                             get_loss=show_loss)
                        if self.verbose >= NNVerbose.METRICS:
                            self._print_metric_logs(show_loss, "train")
                            if x_test is not None:
                                self._print_metric_logs(show_loss, "test")
                    if self.verbose >= NNVerbose.EPOCH:
                        bar.update(counter // record_period + 1)
                        sub_bar = ProgressBar(
                            min_value=0,
                            max_value=train_repeat * record_period - 1,
                            name="Iteration")

        if img is not None:
            cv2.waitKey(0)
            cv2.destroyAllWindows()

        return self._logs
Ejemplo n.º 30
0
    def fit(self, x=None, y=None, lr=0.01, epoch=10, batch_size=128, train_rate=None,
            verbose=0, metrics=None, record_period=100):

        self.verbose = verbose
        self._optimizer = Adam(lr)
        self._tfx = tf.placeholder(tf.float32, shape=[None, x.shape[1]])
        self._tfy = tf.placeholder(tf.float32, shape=[None, y.shape[1]])

        if train_rate is not None:
            train_rate = float(train_rate)
            train_len = int(len(x) * train_rate)
            shuffle_suffix = np.random.permutation(int(len(x)))
            x, y = x[shuffle_suffix], y[shuffle_suffix]
            x_train, y_train = x[:train_len], y[:train_len]
            x_test, y_test = x[train_len:], y[train_len:]
        else:
            x_train = x_test = x
            y_train = y_test = y

        train_len = len(x_train)
        batch_size = min(batch_size, train_len)
        do_random_batch = train_len >= batch_size
        train_repeat = int(train_len / batch_size) + 1

        self._metrics = ["acc"] if metrics is None else metrics
        for i, metric in enumerate(self._metrics):
            if isinstance(metric, str):
                self._metrics[i] = self._available_metrics[metric]
        self._metric_names = [_m.__name__ for _m in self._metrics]
        self._logs = {
            name: [[] for _ in range(len(self._metrics) + 1)] for name in ("train", "test")
        }

        bar = ProgressBar(max_value=max(1, epoch // record_period), name="Epoch", start=False)
        if self.verbose >= NNVerbose.EPOCH:
            bar.start()

        with self._sess.as_default() as sess:

            # Define session
            self._cost = self.get_rs(self._tfx, self._tfy)
            self._y_pred = self.get_rs(self._tfx)
            self._train_step = self._optimizer.minimize(self._cost)
            sess.run(tf.global_variables_initializer())

            # Train
            sub_bar = ProgressBar(max_value=train_repeat * record_period - 1, name="Iteration", start=False)
            for counter in range(epoch):
                if self.verbose >= NNVerbose.EPOCH and counter % record_period == 0:
                    sub_bar.start()
                for _i in range(train_repeat):
                    if do_random_batch:
                        batch = np.random.choice(train_len, batch_size)
                        x_batch, y_batch = x_train[batch], y_train[batch]
                    else:
                        x_batch, y_batch = x_train, y_train
                    self._train_step.run(feed_dict={self._tfx: x_batch, self._tfy: y_batch})
                    if self.verbose >= NNVerbose.EPOCH:
                        if sub_bar.update() and self.verbose >= NNVerbose.METRICS_DETAIL:
                            self._append_log(x_train, y_train, "train")
                            self._append_log(x_test, y_test, "test")
                            self._print_metric_logs("train")
                            self._print_metric_logs("test")
                if self.verbose >= NNVerbose.EPOCH:
                    sub_bar.update()
                if (counter + 1) % record_period == 0:
                    self._append_log(x_train, y_train, "train")
                    self._append_log(x_test, y_test, "test")
                    if self.verbose >= NNVerbose.METRICS:
                        self._print_metric_logs("train")
                        self._print_metric_logs("test")
                    if self.verbose >= NNVerbose.EPOCH:
                        bar.update(counter // record_period + 1)
                        sub_bar = ProgressBar(max_value=train_repeat * record_period - 1, name="Iteration", start=False)
Ejemplo n.º 31
0
    def fit(self,
            x,
            y,
            lr=None,
            epoch=None,
            batch_size=None,
            train_rate=None,
            optimizer=None,
            metrics=None,
            record_period=None,
            verbose=None):
        if lr is None:
            lr = self._params["lr"]
        if epoch is None:
            epoch = self._params["epoch"]
        if optimizer is None:
            optimizer = self._params["optimizer"]
        if batch_size is None:
            batch_size = self._params["batch_size"]
        if train_rate is None:
            train_rate = self._params["train_rate"]
        if metrics is None:
            metrics = self._params["metrics"]
        if record_period is None:
            record_period = self._params["record_period"]
        if verbose is None:
            verbose = self._params["verbose"]
        self.verbose = verbose
        self._init_optimizers(optimizer, lr, epoch)
        layer_width = len(self._layers)
        self._preview()
        """
        如果train_rate不为空
        并打乱数据
        划分训练和测试集
        """
        if train_rate is not None:
            train_rate = float(train_rate)
            train_len = int(len(x) * train_rate)
            shuffle_suffix = np.random.permutation(len(x))
            x, y = x[shuffle_suffix], y[shuffle_suffix]
            x_train, y_train = x[:train_len], y[:train_len]
            x_test, y_test = x[train_len:], y[train_len:]
        else:
            x_train = x_test = x
            y_train = y_test = y
        """
        y是one_hot形式,每个样本的类别通过每个数组中值最大值得下标作为类别
        axis=1表示横向取最大值
        a = np.array([[0,0,0,1]])
        c = np.argmax(a,axis=1)
        c = array([3], dtype=int64)
        """
        y_train_classes = np.argmax(y_train, axis=1)
        y_test_classes = np.argmax(y_test, axis=1)

        train_len = len(x_train)
        batch_size = min(batch_size, train_len)
        do_random_batch = train_len > batch_size
        train_repeat = 1 if not do_random_batch else int(train_len /
                                                         batch_size) + 1

        if metrics is None:
            metrics = []
        self._metrics = self.get_metrics(metrics)
        self._metric_names = [_m.__name__ for _m in metrics]  #"acc"
        self._logs = {
            name: [[] for _ in range(len(metrics) + 1)]
            for name in ("train", "test")
        }

        bar = ProgressBar(max_value=max(1, epoch // record_period),
                          name="Epoch",
                          start=False)
        if self.verbose >= NNVerbose.EPOCH:
            bar.start()
        sub_bar = ProgressBar(max_value=train_repeat * record_period - 1,
                              name="Iteration",
                              start=False)

        for counter in range(epoch):
            if self.verbose >= NNVerbose.EPOCH and counter % record_period == 0:
                sub_bar.start()
            for _ in range(train_repeat):
                if do_random_batch:
                    """如果随机batch则,在train_len中选择batch_size个样本"""
                    batch = np.random.choice(train_len, batch_size)
                    x_batch, y_batch = x_train[batch], y_train[batch]
                else:
                    x_batch, y_batch = x_train, y_train
                self._w_optimizer.update()
                self._b_optimizer.update()
                activations = self._get_activations(x_batch)
                """
                反向传播 :bp
                先求出第一个局部梯度,然后后面的梯度和前一个梯度有关系 &(i) = &(i+1) * w_i^T . u'(i)
            
                更新w,b :opt
                """
                deltas = [self._layers[-1].bp_first(y_batch, activations[-1])]
                for i in range(-1, -len(activations), -1):  #i=-1,-2, step=-1
                    deltas.append(self._layers[i - 1].bp(
                        deltas[-1], self._weights[i], activations[i - 1]))  #
                for i in range(layer_width - 1, 0, -1):  #i=2,1 ,step=-1
                    self._opt(i, activations[i - 1],
                              deltas[layer_width - i - 1])
                #第一层对应的是输入
                self._opt(
                    0, x_batch, deltas[-1]
                )  #对应的是第一个隐藏层, W_(i-1)' = v^T_(i-1) * delta_(i) delta[0]是损失层,delta[-1]是第一层
                if self.verbose >= NNVerbose.EPOCH:
                    if sub_bar.update(
                    ) and self.verbose >= NNVerbose.METRICS_DETAIL:
                        self._append_log(x_train, y_train, y_train_classes,
                                         "train")
                        self._append_log(x_test, y_test, y_test_classes,
                                         "test")
                        self._print_metric_logs("train")
                        self._print_metric_logs("test")
            if self.verbose >= NNVerbose.EPOCH:
                sub_bar.update()
            if (counter + 1) % record_period == 0:
                self._append_log(x_train, y_train, y_train_classes, "train")
                self._append_log(x_test, y_test, y_test_classes, "test")
                if self.verbose >= NNVerbose.METRICS:
                    self._print_metric_logs("train")
                    self._print_metric_logs("test")
                if self.verbose >= NNVerbose.EPOCH:
                    bar.update(counter // record_period + 1)
                    sub_bar = ProgressBar(
                        max_value=train_repeat * record_period - 1,
                        name="Iteration",
                        start=False)
Ejemplo n.º 32
0
    def fit(self,
            im,
            om,
            generator,
            cell=LSTMCell,
            n_hidden=128,
            n_history=0,
            squeeze=None,
            use_final_state=None,
            activation=None,
            lr=0.01,
            epoch=10,
            n_iter=128,
            batch_size=64,
            optimizer="Adam",
            eps=1e-8,
            verbose=1):
        if squeeze:
            self._squeeze = True
        if use_final_state:
            self._use_final_state = True
        if callable(activation):
            self._activation = activation
        self._generator = generator
        self._im, self._om = im, om
        self._optimizer = OptFactory().get_optimizer_by_name(optimizer, lr)
        self._define_input(im, om)

        cell = cell(n_hidden)
        initial_state = cell.zero_state(tf.shape(self._input)[0], tf.float32)
        rnn_outputs, rnn_final_state = tf.nn.dynamic_rnn(
            cell, self._input, initial_state=initial_state)
        self._get_output(rnn_outputs, rnn_final_state, n_history)
        loss = self._get_loss(eps)
        train_step = self._optimizer.minimize(loss)
        self._log["iter_err"] = []
        self._log["epoch_err"] = []
        self._sess.run(tf.global_variables_initializer())
        bar = ProgressBar(max_value=epoch, name="Epoch", start=False)
        if verbose >= 2:
            bar.start()
        for _ in range(epoch):
            epoch_err = 0
            sub_bar = ProgressBar(max_value=n_iter, name="Iter", start=False)
            if verbose >= 2:
                sub_bar.start()
            for __ in range(n_iter):
                x_batch, y_batch = self._generator.gen(batch_size)
                iter_err = self._sess.run([loss, train_step], {
                    self._tfx: x_batch,
                    self._tfy: y_batch,
                })[0]
                self._log["iter_err"].append(iter_err)
                epoch_err += iter_err
                if verbose >= 2:
                    sub_bar.update()
            self._log["epoch_err"].append(epoch_err / n_iter)
            if verbose >= 1:
                self._verbose()
                if verbose >= 2:
                    bar.update()
Ejemplo n.º 33
0
    def fit(self, x, y, lr=None, epoch=None, batch_size=None, train_rate=None,
            optimizer=None, metrics=None, record_period=None, verbose=None):
        if lr is None:
            lr = self._params["lr"]
        if epoch is None:
            epoch = self._params["epoch"]
        if optimizer is None:
            optimizer = self._params["optimizer"]
        if batch_size is None:
            batch_size = self._params["batch_size"]
        if train_rate is None:
            train_rate = self._params["train_rate"]
        if metrics is None:
            metrics = self._params["metrics"]
        if record_period is None:
            record_period = self._params["record_period"]
        if verbose is None:
            verbose = self._params["verbose"]
        self.verbose = verbose
        self._init_optimizers(optimizer, lr, epoch)
        layer_width = len(self._layers)
        self._preview()

        if train_rate is not None:
            train_rate = float(train_rate)
            train_len = int(len(x) * train_rate)
            shuffle_suffix = np.random.permutation(len(x))
            x, y = x[shuffle_suffix], y[shuffle_suffix]
            x_train, y_train = x[:train_len], y[:train_len]
            x_test, y_test = x[train_len:], y[train_len:]
        else:
            x_train = x_test = x
            y_train = y_test = y
        y_train_classes = np.argmax(y_train, axis=1)
        y_test_classes = np.argmax(y_test, axis=1)

        train_len = len(x_train)
        batch_size = min(batch_size, train_len)
        do_random_batch = train_len > batch_size
        train_repeat = 1 if not do_random_batch else int(train_len / batch_size) + 1

        if metrics is None:
            metrics = []
        self._metrics = self.get_metrics(metrics)
        self._metric_names = [_m.__name__ for _m in metrics]
        self._logs = {
            name: [[] for _ in range(len(metrics) + 1)] for name in ("train", "test")
        }

        bar = ProgressBar(max_value=max(1, epoch // record_period), name="Epoch", start=False)
        if self.verbose >= NNVerbose.EPOCH:
            bar.start()

        sub_bar = ProgressBar(max_value=train_repeat * record_period - 1, name="Iteration", start=False)
        for counter in range(epoch):
            if self.verbose >= NNVerbose.EPOCH and counter % record_period == 0:
                sub_bar.start()
            for _ in range(train_repeat):
                if do_random_batch:
                    batch = np.random.choice(train_len, batch_size)
                    x_batch, y_batch = x_train[batch], y_train[batch]
                else:
                    x_batch, y_batch = x_train, y_train
                self._w_optimizer.update()
                self._b_optimizer.update()
                activations = self._get_activations(x_batch)
                deltas = [self._layers[-1].bp_first(y_batch, activations[-1])]
                for i in range(-1, -len(activations), -1):
                    deltas.append(
                        self._layers[i - 1].bp(activations[i - 1], self._weights[i], deltas[-1])
                    )
                for i in range(layer_width - 1, 0, -1):
                    self._opt(i, activations[i - 1], deltas[layer_width - i - 1])
                self._opt(0, x_batch, deltas[-1])
                if self.verbose >= NNVerbose.EPOCH:
                    if sub_bar.update() and self.verbose >= NNVerbose.METRICS_DETAIL:
                        self._append_log(x_train, y_train, y_train_classes, "train")
                        self._append_log(x_test, y_test, y_test_classes, "test")
                        self._print_metric_logs("train")
                        self._print_metric_logs("test")
            if self.verbose >= NNVerbose.EPOCH:
                sub_bar.update()
            if (counter + 1) % record_period == 0:
                self._append_log(x_train, y_train, y_train_classes, "train")
                self._append_log(x_test, y_test, y_test_classes, "test")
                if self.verbose >= NNVerbose.METRICS:
                    self._print_metric_logs("train")
                    self._print_metric_logs("test")
                if self.verbose >= NNVerbose.EPOCH:
                    bar.update(counter // record_period + 1)
                    sub_bar = ProgressBar(max_value=train_repeat * record_period - 1, name="Iteration", start=False)
Ejemplo n.º 34
0
 def fit(self,
         x,
         y,
         x_test,
         y_test,
         n_epoch=None,
         n_batch=None,
         print_settings=True):
     if not self.settings_inited:
         self.init_all_settings()
     if n_epoch is not None:
         self.n_epoch = n_epoch
     if n_batch is not None:
         self.n_batch = n_batch
     x, y, x_test, y_test = self.prepare_data(x, y, x_test, y_test)
     if not self.model_built:
         self.build_model(x, y, x_test, y_test, print_settings)
     count = 0
     train_losses, test_losses = [], []
     with self._sess.as_default() as sess:
         # Prepare
         i = 0
         train_writer, test_writer, train_merge_op, test_merge_op = self._prepare_tensorboard_verbose(
             sess)
         bar = ProgressBar(max_value=self.n_epoch, name="Main")
         train_info = [train_merge_op, train_losses, train_writer]
         test_info = [test_merge_op, test_losses, test_writer]
         self._calculate_loss(train_losses, test_losses)
         train_metric, test_metric = self._get_metrics(x, y, x_test, y_test)
         if self.tensorboard_verbose > 0:
             self._do_tensorboard_verbose(count, train_info, test_info,
                                          train_metric, test_metric)
         # Train
         while i < self.n_epoch:
             for local_dict in self.gen_dicts(self.train_data, count=count):
                 count += 1
                 self._sess.run(self._train_step, local_dict)
                 if self.snapshot_step > 0 and count % self.snapshot_step == 0:
                     if self.tensorboard_verbose > 0:
                         train_metric, test_metric = self._get_metrics(
                             x, y, x_test, y_test)
                         self._do_tensorboard_verbose(
                             count, train_info, test_info, train_metric,
                             test_metric)
             i += 1
             if self.tensorboard_verbose > 0:
                 if train_metric is None:
                     y_pred, y_test_pred = self._calculate_loss(
                         train_losses, test_losses, return_pred=True)
                     train_metric = self.metric(y, y_pred)
                     test_metric = self.metric(
                         y_test,
                         y_test_pred) if y_test is not None else None
                 else:
                     self._calculate_loss(train_losses, test_losses)
                 self._do_tensorboard_verbose(count, train_info, test_info,
                                              train_metric, test_metric)
             else:
                 self._calculate_loss(train_losses, test_losses)
             if bar is not None:
                 bar.update()
     return train_losses, test_losses
Ejemplo n.º 35
0
    def fit(self,
            x,
            y,
            lr=None,
            epoch=None,
            batch_size=None,
            train_rate=None,
            optimizer=None,
            metrics=None,
            record_period=None,
            verbose=None,
            preview=None):
        if lr is None:
            lr = self._params["lr"]
        if epoch is None:
            epoch = self._params["epoch"]
        if optimizer is None:
            optimizer = self._params["optimizer"]
        if batch_size is None:
            batch_size = self._params["batch_size"]
        if train_rate is None:
            train_rate = self._params["train_rate"]
        if metrics is None:
            metrics = self._params["metrics"]
        if record_period is None:
            record_period = self._params["record_period"]
        if verbose is None:
            verbose = self._params["verbose"]
        if preview is None:
            preview = self._params["preview"]

        x = NN._transfer_x(x)
        self.verbose = verbose
        self._optimizers = OptFactory().get_optimizer_by_name(optimizer, lr)
        #None=batch_size的占位符, *很有技巧
        """
        a=np.array([[12,21],[123,2]])
        def get(a):
        print(*a.shape)
        print(a.shape)
      
        get(a)
        2 2
        (2, 2)
        """
        self._tfx = tf.placeholder(tf.float32, shape=[None, *x.shape[1:]])
        self._tfy = tf.placeholder(tf.float32, shape=[None, y.shape[1]])

        if train_rate is not None:
            train_rate = float(train_rate)
            train_len = int(len(x) * train_rate)
            shuffle_shuffix = np.random.permutation(int(len(x)))
            x, y = x[shuffle_shuffix], y[shuffle_shuffix]
            x_train, y_train = x[:train_len], y[:train_len]
            x_test, y_test = x[train_len:], y[train_len:]
        else:
            x_train = x_test = x
            y_train = y_test = y
        y_train_classes = np.argmax(y_train, axis=1)
        y_test_classes = np.argmax(y_test, axis=1)
        if metrics is None:
            metrics = []
        self._metrics = self.get_metrics(metrics)
        self._metric_names = [_m.__name__ for _m in metrics]
        self._logs = {
            name: [[] for _ in range(len(metrics) + 1)]
            for name in ("Train", "Test")
        }

        bar = ProgressBar(max_value=max(1, epoch // record_period),
                          name="Epoch",
                          start=False)
        if self.verbose >= NNVerbose.EPOCH:
            bar.start()

        if preview:
            self._preview()
        args = ((x_train, y_train, y_train_classes, x_test, y_test,
                 y_test_classes, self.verbose >= NNVerbose.METRICS_DETAIL),
                (None, None, x_train, y_train, y_train_classes, x_test, y_test,
                 y_test_classes, self.verbose >= NNVerbose.METRICS))

        train_repeat = self._get_train_repeat(x, batch_size)
        with self._sess.as_default() as sess:
            self._y_pred = self._get_rs(self._tfx)
            self._inner_y = self._get_rs(self._tfx, predict=False)
            self._loss = self._layers[-1].calculate(self._tfy, self._inner_y)
            self._train_step = self._optimizers.minimize(self._loss)
            sess.run(tf.global_variables_initializer())
            for counter in range(epoch):
                if self.verbose >= NNVerbose.ITER and counter % record_period == 0:
                    sub_bar = ProgressBar(
                        max_value=train_repeat * record_period - 1,
                        name="Iteration")
                else:
                    sub_bar = None
                self._batch_training(x_train, y_train, batch_size,
                                     train_repeat, self._loss,
                                     self._train_step, sub_bar, *args[0])
                if (counter + 1) % record_period == 0:
                    self._batch_work(*args[1])
                    if self.verbose >= NNVerbose.EPOCH:
                        bar.update(counter // record_period + 1)
Ejemplo n.º 36
0
    def fit(self,
            x, y, x_test=None, y_test=None,
            batch_size=128, record_period=1, train_only=False,
            optimizer="Adam", lr=0.001, lb=0.001, epoch=20, weight_scale=1, apply_bias=True,
            show_loss=True, metrics=None, do_log=True, verbose=None,
            visualize=False, visualize_setting=None,
            draw_weights=False, animation_params=None):

        self._lr, self._epoch = lr, epoch
        for weight in self._weights:
            if weight is not None:
                weight.data *= weight_scale
        self._model_parameters = [w for w in self._weights if w is not None]
        if apply_bias:
            self._model_parameters += self._bias
        self._optimizer = optimizer
        self._init_optimizer()
        assert isinstance(self._optimizer, Optimizer)
        print()
        print("=" * 30)
        print("Optimizers")
        print("-" * 30)
        print(self._optimizer)
        print("-" * 30)

        if not self._layers:
            raise BuildNetworkError("Please provide layers before fitting data")
        if y.shape[1] != self._current_dimension:
            raise BuildNetworkError("Output layer's shape should be {}, {} found".format(
                self._current_dimension, y.shape[1]))

        x, y = self._arr_to_variable(False, x, y)
        if x_test is not None and y_test is not None:
            x_test, y_test = self._arr_to_variable(False, x_test, y_test)
        (x_train, x_test), (y_train, y_test) = self._split_data(
            x, y, x_test, y_test, train_only)
        train_len = len(x_train)
        batch_size = min(batch_size, train_len)
        do_random_batch = train_len > batch_size
        train_repeat = 1 if not do_random_batch else int(train_len / batch_size) + 1
        self._regularization_param = 1 - lb * lr / batch_size
        self._get_min_max(x_train, y_train)

        self._metrics = ["acc"] if metrics is None else metrics
        for i, metric in enumerate(self._metrics):
            if isinstance(metric, str):
                if metric not in self._available_metrics:
                    raise BuildNetworkError("Metric '{}' is not implemented".format(metric))
                self._metrics[i] = self._available_metrics[metric]
        self._metric_names = [_m.__name__ for _m in self._metrics]

        self._logs = {
            name: [[] for _ in range(len(self._metrics) + 1)] for name in ("train", "cv", "test")
        }
        if verbose is not None:
            self.verbose = verbose

        self._apply_bias = apply_bias

        bar = ProgressBar(max_value=max(1, epoch // record_period), name="Epoch", start=False)
        if self.verbose >= NNVerbose.EPOCH:
            bar.start()
        img, ims = None, []

        if draw_weights:
            weight_trace = [[[org] for org in weight] for weight in self._weights]
        else:
            weight_trace = []

        loss_function = self._layers[-1].calculate
        args = (
            x_train, y_train, x_test, y_test,
            draw_weights, weight_trace, show_loss
        )

        *animation_properties, animation_params = self._get_animation_params(animation_params)
        sub_bar = ProgressBar(max_value=train_repeat * record_period - 1, name="Iteration", start=False)
        for counter in range(epoch):
            self._optimizer.update()
            if self.verbose >= NNVerbose.ITER and counter % record_period == 0:
                sub_bar.start()
            self.batch_training(
                x_train, y_train, batch_size, train_repeat, loss_function, sub_bar, *args
            )
            if self.verbose >= NNVerbose.ITER:
                sub_bar.update()
            self._handle_animation(
                counter, x, y, ims, animation_params, *animation_properties,
                img=self._draw_2d_network(**animation_params), name="Neural Network"
            )
            if do_log:
                self._append_log(x, y, "train", get_loss=show_loss)
                self._append_log(x_test, y_test, "cv", get_loss=show_loss)
            if (counter + 1) % record_period == 0:
                if do_log and self.verbose >= NNVerbose.METRICS:
                    self._print_metric_logs(show_loss, "train")
                    self._print_metric_logs(show_loss, "cv")
                if visualize:
                    if visualize_setting is None:
                        self.visualize2d(x_test, y_test)
                    else:
                        self.visualize2d(x_test, y_test, *visualize_setting)
                if self.verbose >= NNVerbose.EPOCH:
                    bar.update(counter // record_period + 1)
                    if self.verbose >= NNVerbose.ITER:
                        sub_bar = ProgressBar(max_value=train_repeat * record_period - 1, name="Iteration", start=False)

        if do_log:
            self._append_log(x_test, y_test, "test", get_loss=show_loss)
        if img is not None:
            cv2.waitKey(0)
            cv2.destroyAllWindows()
        if draw_weights:
            ts = np.arange(epoch * train_repeat + 1)
            for i, weight in enumerate(self._weights):
                plt.figure()
                for j in range(len(weight)):
                    plt.plot(ts, weight_trace[i][j])
                plt.title("Weights toward layer {} ({})".format(i + 1, self._layers[i].name))
                plt.show()
        self._handle_mp4(ims, animation_properties, "NN")
        return self._logs
Ejemplo n.º 37
0
    def fit(self,
            x,
            y,
            sample_weight=None,
            kernel=None,
            epoch=None,
            x_test=None,
            y_test=None,
            metrics=None,
            **kwargs):
        if sample_weight is None:
            sample_weight = self._params["sw"]
        if kernel is None:
            kernel = self._params["kernel"]
        if epoch is None:
            epoch = self._params["epoch"]
        if x_test is None:
            x_test = self._params["x_test"]
        if y_test is None:
            y_test = self._params["y_test"]
        if metrics is None:
            metrics = self._params["metrics"]
        self._x, self._y = np.atleast_2d(x), np.asarray(y)
        if kernel == "poly":
            _p = kwargs.get("p", self._params["p"])
            self._kernel_name = "Polynomial"
            self._kernel_param = "degree = {}".format(_p)
            self._kernel = lambda _x, _y: KernelBase._poly(_x, _y, _p)
        elif kernel == "rbf":
            _gamma = kwargs.get("gamma", 1 / self._x.shape[1])
            self._kernel_name = "RBF"
            self._kernel_param = r"$\gamma = {:8.6}$".format(_gamma)
            self._kernel = lambda _x, _y: KernelBase._rbf(_x, _y, _gamma)
        else:
            raise NotImplementedError(
                "Kernel '{}' has not defined".format(kernel))
        if sample_weight is None:
            sample_weight = np.ones(len(y))
        else:
            sample_weight = np.asarray(sample_weight) * len(y)

        self._alpha, self._w, self._prediction_cache = (np.zeros(len(x)),
                                                        np.zeros(len(x)),
                                                        np.zeros(len(x)))
        self._gram = self._kernel(self._x, self._x)
        self._b = 0
        self._prepare(sample_weight, **kwargs)

        _fit_args, _logs = [], []
        for _name, _arg in zip(self._fit_args_names, self._fit_args):
            if _name in kwargs:
                _arg = kwargs[_name]
            _fit_args.append(_arg)
        if self._do_log:
            if metrics is not None:
                self.get_metrics(metrics)
            _test_gram = None
            if x_test is not None and y_test is not None:
                _xv, _yv = np.atleast_2d(x_test), np.asarray(y_test)
                _test_gram = self._kernel(_xv, self._x)
            else:
                _xv, _yv = self._x, self._y
        else:
            _yv = _test_gram = None
        bar = ProgressBar(max_value=epoch, name=str(self))
        bar.start()
        for _ in range(epoch):
            if self._fit(sample_weight, *_fit_args):
                bar.update(epoch)
                break
            if self._do_log and metrics is not None:
                _local_logs = []
                for metric in metrics:
                    if _test_gram is None:
                        _local_logs.append(
                            metric(self._y, np.sign(self._prediction_cache)))
                    else:
                        _local_logs.append(
                            metric(
                                _yv,
                                self.predict(_test_gram, gram_provided=True)))
                _logs.append(_local_logs)
            bar.update()
        return _logs
Ejemplo n.º 38
0
    def fit(self,
            im,
            om,
            generator,
            cell=None,
            provide_sequence_length=None,
            squeeze=None,
            use_sparse_labels=None,
            embedding_size=None,
            use_final_state=None,
            n_hidden=None,
            n_history=None,
            activation=None,
            lr=None,
            epoch=None,
            n_iter=None,
            batch_size=None,
            optimizer=None,
            eps=None,
            verbose=None):
        if cell is None:
            cell = self._params["cell"]
        if provide_sequence_length is None:
            provide_sequence_length = self._params["provide_sequence_length"]
        if n_hidden is None:
            n_hidden = self._params["n_hidden"]
        if n_history is None:
            n_history = self._params["n_history"]
        if squeeze:
            self._squeeze = True
        if use_sparse_labels:
            self._use_sparse_labels = True
        if self._squeeze and n_history == 0:
            n_history = 1
        if embedding_size:
            self._embedding_size = embedding_size
        if use_final_state:
            self._use_final_state = True
        if activation is None:
            activation = self._params["activation"]
        if lr is None:
            lr = self._params["lr"]
        if epoch is None:
            epoch = self._params["epoch"]
        if n_iter is None:
            n_iter = self._params["n_iter"]
        if optimizer is None:
            optimizer = self._params["optimizer"]
        if batch_size is None:
            batch_size = self._params["batch_size"]
        if eps is None:
            eps = self._params["eps"]
        if verbose is None:
            verbose = self._params["verbose"]

        self._generator = generator
        self._im, self._om, self._activation = im, om, activation
        self._optimizer = OptFactory().get_optimizer_by_name(optimizer, lr)
        self._define_input(im, om)

        self._cell = cell(n_hidden)
        self._prepare_for_dynamic_rnn(provide_sequence_length)
        rnn_outputs, rnn_final_state = tf.nn.dynamic_rnn(
            self._cell,
            self._input,
            return_all_states=True,
            sequence_length=self._sequence_lengths,
            initial_state=self._initial_state)
        self._get_output(rnn_outputs, rnn_final_state, n_history)
        loss = self._get_loss(eps)
        train_step = self._optimizer.minimize(loss)
        self._log["iter_err"] = []
        self._log["epoch_err"] = []
        self._sess.run(tf.global_variables_initializer())
        bar = ProgressBar(max_value=epoch, name="Epoch", start=False)
        if verbose >= 2:
            bar.start()
        for _ in range(epoch):
            epoch_err = 0
            sub_bar = ProgressBar(max_value=n_iter, name="Iter", start=False)
            if verbose >= 2:
                sub_bar.start()
            for __ in range(n_iter):
                if provide_sequence_length:
                    x_batch, y_batch, sequence_length = self._generator.gen(
                        batch_size)
                    feed_dict = {
                        self._tfx: x_batch,
                        self._tfy: y_batch,
                        self._sequence_lengths: sequence_length
                    }
                else:
                    x_batch, y_batch = self._generator.gen(batch_size)
                    feed_dict = {self._tfx: x_batch, self._tfy: y_batch}
                iter_err = self._sess.run([loss, train_step], feed_dict)[0]
                self._log["iter_err"].append(iter_err)
                epoch_err += iter_err
                if verbose >= 2:
                    sub_bar.update()
            self._log["epoch_err"].append(epoch_err / n_iter)
            if verbose >= 1:
                self._verbose()
                if verbose >= 2:
                    bar.update()
Ejemplo n.º 39
0
 def _extract(self, verbose):
     features = []
     with tf.Session() as sess:
         if self._extractor == "v3":
             _output = "pool_3:0"
         elif self._extractor == "ResNet-v2":
             _output = "InceptionResnetV2/Logits/Flatten/Reshape:0"
         elif self._extractor == "cnn":
             _output = "final_result/Reshape:0"
         else:
             _output = "OutputFlow/Reshape:0"
         flattened_tensor = sess.graph.get_tensor_by_name(_output)
         if self._extractor == "v3":
             _entry = "DecodeJpeg/contents:0"
         elif self._extractor == "ResNet-v2":
             _entry = "Placeholder:0"
         else:
             _entry = "Entry/Placeholder:0"
         pop_lst = []
         if "cnn" in self._extractor or "ResNet" in self._extractor and self._mat_dir is not None:
             features = np.load(self._mat_dir)
         else:
             def process(img_path):
                 img_data = gfile.FastGFile(img_path, "rb").read()
                 feature = sess.run(flattened_tensor, {
                     _entry: img_data
                 })
                 features.append(np.squeeze(feature))
             for i, image_path in enumerate(self._image_paths):
                 if not os.path.isfile(image_path):
                     continue
                 if "v3" in self._extractor:
                     if verbose:
                         print("Processing {}...".format(image_path))
                     try:
                         process(image_path)
                     except Exception as err:
                         if verbose:
                             print(err)
                         name, extension = os.path.splitext(image_path)
                         base = os.path.basename(image_path)
                         if extension.lower() in (".jpg", ".jpeg"):
                             new_name = name[:image_path.rfind(base)] + "{:06d}{}".format(i, extension)
                             print("Renaming {} to {}...".format(image_path, new_name))
                             os.rename(image_path, new_name)
                             process(new_name)
                         else:
                             new_name_base = name[:image_path.rfind(base)] + "{:06d}".format(i)
                             new_name = new_name_base + ".jpg"
                             print("Transforming {} to {}...".format(image_path, new_name))
                             try:
                                 if imghdr.what(image_path) is None:
                                     raise ValueError("{} is not an image".format(image_path))
                                 os.rename(image_path, new_name_base + extension)
                                 cv2.imwrite(new_name, cv2.imread(new_name_base + extension))
                                 os.remove(new_name_base + extension)
                                 process(new_name)
                             except Exception as err:
                                 print(err)
                                 print("Moving {} to '_err' folder...".format(image_path))
                                 if not os.path.isdir("_err"):
                                     os.makedirs("_err")
                                 shutil.move(image_path, os.path.join("_err", os.path.basename(image_path)))
                                 pop_lst.append(i)
                 else:
                     if verbose:
                         print("Reading {}...".format(image_path))
                     image_data = cv2.imread(image_path)
                     if self._extractor == "ResNet-v2":
                         features.append(cv2.resize(image_data, (299, 299)))
                     else:
                         features.append(cv2.resize(image_data, (64, 64)))
         if "v3" not in self._extractor:
             features = np.array(features)
             print("Extracting features...")
             rs = []
             batch_size = math.floor(1e6 / np.prod(features.shape[1:]))
             epoch = int(math.ceil(len(features) / batch_size))
             bar = ProgressBar(max_value=epoch, name="Extract")
             for i in range(epoch):
                 if i == epoch - 1:
                     rs.append(sess.run(flattened_tensor, {
                         _entry: features[i*batch_size:]
                     }))
                 else:
                     rs.append(sess.run(flattened_tensor, {
                         _entry: features[i*batch_size:(i+1)*batch_size]
                     }))
                 bar.update()
             return np.vstack(rs).astype(np.float32)
         if pop_lst:
             labels = []
             pop_cursor, pop_idx = 0, pop_lst[0]
             for i, label in enumerate(self._labels):
                 if i == pop_idx:
                     pop_cursor += 1
                     if pop_cursor < len(pop_lst):
                         pop_idx = pop_lst[pop_cursor]
                     else:
                         pop_idx = -1
                     continue
                 labels.append(label)
             labels = np.array(labels, dtype=np.float32)
         elif self._labels is None:
             labels = None
         else:
             labels = np.array(self._labels, dtype=np.float32)
         return np.array(features, dtype=np.float32), labels
Ejemplo n.º 40
0
    def fit(self,
            x=None,
            y=None,
            x_test=None,
            y_test=None,
            batch_size=128,
            record_period=1,
            train_only=False,
            optimizer=None,
            w_optimizer=None,
            b_optimizer=None,
            lr=0.001,
            lb=0.001,
            epoch=20,
            weight_scale=1,
            apply_bias=True,
            show_loss=True,
            metrics=None,
            do_log=True,
            verbose=None,
            visualize=False,
            visualize_setting=None,
            draw_weights=False,
            animation_params=None):
        self._lr, self._epoch = lr, epoch
        for weight in self._weights:
            weight *= weight_scale
        if not self._w_optimizer or not self._b_optimizer:
            if not self._optimizer_name:
                if optimizer is None:
                    optimizer = "Adam"
                self._w_optimizer = optimizer if w_optimizer is None else w_optimizer
                self._b_optimizer = optimizer if b_optimizer is None else b_optimizer
            else:
                if not self._w_optimizer:
                    self._w_optimizer = self._optimizer_name
                if not self._b_optimizer:
                    self._b_optimizer = self._optimizer_name
        self._init_optimizer()
        assert isinstance(self._w_optimizer, Optimizer) and isinstance(
            self._b_optimizer, Optimizer)
        print()
        print("=" * 30)
        print("Optimizers")
        print("-" * 30)
        print("w: {}\nb: {}".format(self._w_optimizer, self._b_optimizer))
        print("-" * 30)
        if not self._layers:
            raise BuildNetworkError(
                "Please provide layers before fitting data")
        if y.shape[1] != self._current_dimension:
            raise BuildNetworkError(
                "Output layer's shape should be {}, {} found".format(
                    self._current_dimension, y.shape[1]))

        (x_train, x_test), (y_train,
                            y_test) = self._split_data(x, y, x_test, y_test,
                                                       train_only)
        train_len = len(x_train)
        batch_size = min(batch_size, train_len)
        do_random_batch = train_len > batch_size
        train_repeat = 1 if not do_random_batch else int(train_len /
                                                         batch_size) + 1
        self._regularization_param = 1 - lb * lr / batch_size
        self._get_min_max(x_train, y_train)

        self._metrics = ["acc"] if metrics is None else metrics
        for i, metric in enumerate(self._metrics):
            if isinstance(metric, str):
                if metric not in self._available_metrics:
                    raise BuildNetworkError(
                        "Metric '{}' is not implemented".format(metric))
                self._metrics[i] = self._available_metrics[metric]
        self._metric_names = [_m.__name__ for _m in self._metrics]

        self._logs = {
            name: [[] for _ in range(len(self._metrics) + 1)]
            for name in ("train", "cv", "test")
        }
        if verbose is not None:
            self.verbose = verbose

        layer_width = len(self._layers)
        self._apply_bias = apply_bias

        bar = ProgressBar(max_value=max(1, epoch // record_period),
                          name="Epoch",
                          start=False)
        if self.verbose >= NNVerbose.EPOCH:
            bar.start()
        img, ims = None, []

        if draw_weights:
            weight_trace = [[[org] for org in weight]
                            for weight in self._weights]
        else:
            weight_trace = []

        *animation_properties, animation_params = self._get_animation_params(
            animation_params)
        sub_bar = ProgressBar(max_value=train_repeat * record_period - 1,
                              name="Iteration",
                              start=False)
        for counter in range(epoch):
            self._w_optimizer.update()
            self._b_optimizer.update()
            if self.verbose >= NNVerbose.ITER and counter % record_period == 0:
                sub_bar.start()
            for _ in range(train_repeat):
                if do_random_batch:
                    batch = np.random.choice(train_len, batch_size)
                    x_batch, y_batch = x_train[batch], y_train[batch]
                else:
                    x_batch, y_batch = x_train, y_train
                activations = self._get_activations(x_batch)

                deltas = [self._layers[-1].bp_first(y_batch, activations[-1])]
                for i in range(-1, -len(activations), -1):
                    deltas.append(self._layers[i - 1].bp(
                        activations[i - 1], self._weights[i], deltas[-1]))

                for i in range(layer_width - 1, 0, -1):
                    if not isinstance(self._layers[i], SubLayer):
                        self._opt(i, activations[i - 1],
                                  deltas[layer_width - i - 1])
                self._opt(0, x_batch, deltas[-1])

                if draw_weights:
                    for i, weight in enumerate(self._weights):
                        for j, new_weight in enumerate(weight.copy()):
                            weight_trace[i][j].append(new_weight)
                if self.verbose >= NNVerbose.DEBUG:
                    pass
                if self.verbose >= NNVerbose.ITER:
                    if sub_bar.update(
                    ) and self.verbose >= NNVerbose.METRICS_DETAIL:
                        self._append_log(x, y, "train", get_loss=show_loss)
                        self._append_log(x_test,
                                         y_test,
                                         "cv",
                                         get_loss=show_loss)
                        self._print_metric_logs(show_loss, "train")
                        self._print_metric_logs(show_loss, "cv")
            if self.verbose >= NNVerbose.ITER:
                sub_bar.update()
            self._handle_animation(
                counter,
                x,
                y,
                ims,
                animation_params,
                *animation_properties,
                img=self._draw_2d_network(**animation_params),
                name="Neural Network")
            if do_log:
                self._append_log(x, y, "train", get_loss=show_loss)
                self._append_log(x_test, y_test, "cv", get_loss=show_loss)
            if (counter + 1) % record_period == 0:
                if do_log and self.verbose >= NNVerbose.METRICS:
                    self._print_metric_logs(show_loss, "train")
                    self._print_metric_logs(show_loss, "cv")
                if visualize:
                    if visualize_setting is None:
                        self.visualize2d(x_test, y_test)
                    else:
                        self.visualize2d(x_test, y_test, *visualize_setting)
                if self.verbose >= NNVerbose.EPOCH:
                    bar.update(counter // record_period + 1)
                    if self.verbose >= NNVerbose.ITER:
                        sub_bar = ProgressBar(
                            max_value=train_repeat * record_period - 1,
                            name="Iteration",
                            start=False)

        if do_log:
            self._append_log(x_test, y_test, "test", get_loss=show_loss)
        if img is not None:
            cv2.waitKey(0)
            cv2.destroyAllWindows()
        if draw_weights:
            ts = np.arange(epoch * train_repeat + 1)
            for i, weight in enumerate(self._weights):
                plt.figure()
                for j in range(len(weight)):
                    plt.plot(ts, weight_trace[i][j])
                plt.title("Weights toward layer {} ({})".format(
                    i + 1, self._layers[i].name))
                plt.show()
        self._handle_mp4(ims, animation_properties, "NN")
        return self._logs
Ejemplo n.º 41
0
 def _extract(self, verbose):
     features = []
     with tf.Session() as sess:
         if self._extractor == "v3":
             _output = "pool_3:0"
         elif self._extractor == "ResNet-v2":
             _output = "InceptionResnetV2/Logits/Flatten/Reshape:0"
         elif self._extractor == "cnn":
             _output = "final_result/Reshape:0"
         else:
             _output = "OutputFlow/Reshape:0"
         flattened_tensor = sess.graph.get_tensor_by_name(_output)
         if self._extractor == "v3":
             _entry = "DecodeJpeg/contents:0"
         elif self._extractor == "ResNet-v2":
             _entry = "Placeholder:0"
         else:
             _entry = "Entry/Placeholder:0"
         if "cnn" in self._extractor or "ResNet" in self._extractor and self._mat_dir is not None:
             features = np.load(self._mat_dir)
         else:
             for i, image_path in enumerate(self._image_paths):
                 if not os.path.isfile(image_path):
                     continue
                 if "v3" in self._extractor:
                     if verbose:
                         print("Processing {}...".format(image_path))
                     try:
                         image_data = gfile.FastGFile(image_path,
                                                      "rb").read()
                         feature = sess.run(flattened_tensor,
                                            {_entry: image_data})
                         features.append(np.squeeze(feature))
                     except Exception as err:
                         if verbose:
                             print(err)
                             print("Moving {} to '_err' folder...".format(
                                 image_path))
                         if not os.path.isdir("_err"):
                             os.makedirs("_err")
                         shutil.move(
                             image_path,
                             os.path.join("_err",
                                          os.path.basename(image_path)))
                 else:
                     if verbose:
                         print("Reading {}...".format(image_path))
                     image_data = cv2.imread(image_path)
                     if self._extractor == "ResNet-v2":
                         features.append(cv2.resize(image_data, (299, 299)))
                     else:
                         features.append(cv2.resize(image_data, (64, 64)))
         if "v3" not in self._extractor:
             features = np.array(features)
             print("Extracting features...")
             rs = []
             batch_size = math.floor(1e6 / np.prod(features.shape[1:]))
             epoch = math.ceil(len(features) / batch_size)
             bar = ProgressBar(max_value=epoch, name="Extract")
             bar.start()
             for i in range(epoch):
                 if i == epoch - 1:
                     rs.append(
                         sess.run(flattened_tensor,
                                  {_entry: features[i * batch_size:]}))
                 else:
                     rs.append(
                         sess.run(
                             flattened_tensor, {
                                 _entry:
                                 features[i * batch_size:(i + 1) *
                                          batch_size]
                             }))
                 bar.update()
             return np.vstack(rs).astype(np.float32)
         return np.array(features, dtype=np.float32)
Ejemplo n.º 42
0
def test(test_sets, prob_lists):
    acc_lst = []
    for i in range(10):
        _prob_lst = prob_lists[i]
        x_test, y_test = test_sets[i]
        y_pred = np.array([pick_best(sentence, _prob_lst) for sentence in x_test])
        y_test = np.array(y_test)
        acc_lst.append(100 * np.sum(y_pred == y_test) / len(y_pred))
    return acc_lst

if __name__ == '__main__':
    _rs, epoch = [], 10
    bar = ProgressBar(max_value=epoch, name="_NB")
    for _ in range(epoch):
        _rs.append(test(*train()))
        bar.update()
    _rs = np.array(_rs).T
    # x_base = np.arange(len(_rs[0])) + 1
    # plt.figure()
    # for _acc_lst in _rs:
    #     plt.plot(x_base, _acc_lst)
    # plt.plot(x_base, np.average(_rs, axis=0), linewidth=4, label="Average")
    # plt.xlim(1, epoch)
    # plt.ylim(np.min(_rs), np.max(_rs)+2)
    # plt.legend(loc="lower right")
    # plt.show()
    plt.figure()
    plt.boxplot(_rs.T, vert=False, showmeans=True)
    plt.show()
    _rs = np.array(_rs).ravel()
    print("Acc Mean     : {:8.6}".format(np.average(_rs)))
Ejemplo n.º 43
0
    def fit(self,
            x,
            y,
            x_test=None,
            y_test=None,
            batch_size=128,
            record_period=1,
            train_only=False,
            optimizer="Adam",
            lr=0.001,
            lb=0.001,
            epoch=20,
            weight_scale=1,
            apply_bias=True,
            show_loss=True,
            metrics=None,
            do_log=True,
            verbose=None,
            visualize=False,
            visualize_setting=None,
            draw_weights=False,
            animation_params=None):

        self._lr, self._epoch = lr, epoch
        for weight in self._weights:
            weight.data *= weight_scale
        self._model_parameters = self._weights
        if apply_bias:
            self._model_parameters += self._bias
        self._optimizer = optimizer
        self._init_optimizer()
        assert isinstance(self._optimizer, Optimizer)
        print()
        print("=" * 30)
        print("Optimizers")
        print("-" * 30)
        print(self._optimizer)
        print("-" * 30)

        if not self._layers:
            raise BuildNetworkError(
                "Please provide layers before fitting data")
        if y.shape[1] != self._current_dimension:
            raise BuildNetworkError(
                "Output layer's shape should be {}, {} found".format(
                    self._current_dimension, y.shape[1]))

        x, y = self._arr_to_variable(False, x, y)
        if x_test is not None and y_test is not None:
            x_test, y_test = self._arr_to_variable(False, x_test, y_test)
        (x_train, x_test), (y_train,
                            y_test) = self._split_data(x, y, x_test, y_test,
                                                       train_only)
        train_len = len(x_train)
        batch_size = min(batch_size, train_len)
        do_random_batch = train_len > batch_size
        train_repeat = 1 if not do_random_batch else int(train_len /
                                                         batch_size) + 1
        self._regularization_param = 1 - lb * lr / batch_size
        self._get_min_max(x_train, y_train)

        self._metrics = ["acc"] if metrics is None else metrics
        for i, metric in enumerate(self._metrics):
            if isinstance(metric, str):
                if metric not in self._available_metrics:
                    raise BuildNetworkError(
                        "Metric '{}' is not implemented".format(metric))
                self._metrics[i] = self._available_metrics[metric]
        self._metric_names = [_m.__name__ for _m in self._metrics]

        self._logs = {
            name: [[] for _ in range(len(self._metrics) + 1)]
            for name in ("train", "cv", "test")
        }
        if verbose is not None:
            self.verbose = verbose

        self._apply_bias = apply_bias

        bar = ProgressBar(max_value=max(1, epoch // record_period),
                          name="Epoch",
                          start=False)
        if self.verbose >= NNVerbose.EPOCH:
            bar.start()
        img, ims = None, []

        if draw_weights:
            weight_trace = [[[org] for org in weight]
                            for weight in self._weights]
        else:
            weight_trace = []

        loss_function = self._layers[-1].calculate
        args = (x_train, y_train, x_test, y_test, draw_weights, weight_trace,
                show_loss)

        *animation_properties, animation_params = self._get_animation_params(
            animation_params)
        sub_bar = ProgressBar(max_value=train_repeat * record_period - 1,
                              name="Iteration",
                              start=False)
        for counter in range(epoch):
            self._optimizer.update()
            if self.verbose >= NNVerbose.ITER and counter % record_period == 0:
                sub_bar.start()
            self.batch_training(x_train, y_train, batch_size, train_repeat,
                                loss_function, sub_bar, *args)
            if self.verbose >= NNVerbose.ITER:
                sub_bar.update()
            self._handle_animation(
                counter,
                x,
                y,
                ims,
                animation_params,
                *animation_properties,
                img=self._draw_2d_network(**animation_params),
                name="Neural Network")
            if do_log:
                self._append_log(x, y, "train", get_loss=show_loss)
                self._append_log(x_test, y_test, "cv", get_loss=show_loss)
            if (counter + 1) % record_period == 0:
                if do_log and self.verbose >= NNVerbose.METRICS:
                    self._print_metric_logs(show_loss, "train")
                    self._print_metric_logs(show_loss, "cv")
                if visualize:
                    if visualize_setting is None:
                        self.visualize2d(x_test, y_test)
                    else:
                        self.visualize2d(x_test, y_test, *visualize_setting)
                if self.verbose >= NNVerbose.EPOCH:
                    bar.update(counter // record_period + 1)
                    if self.verbose >= NNVerbose.ITER:
                        sub_bar = ProgressBar(
                            max_value=train_repeat * record_period - 1,
                            name="Iteration",
                            start=False)

        if do_log:
            self._append_log(x_test, y_test, "test", get_loss=show_loss)
        if img is not None:
            cv2.waitKey(0)
            cv2.destroyAllWindows()
        if draw_weights:
            ts = np.arange(epoch * train_repeat + 1)
            for i, weight in enumerate(self._weights):
                plt.figure()
                for j in range(len(weight)):
                    plt.plot(ts, weight_trace[i][j])
                plt.title("Weights toward layer {} ({})".format(
                    i + 1, self._layers[i].name))
                plt.show()
        self._handle_mp4(ims, animation_properties, "NN")
        return self._logs
Ejemplo n.º 44
0
    def fit(self,
            x, y, x_test=None, y_test=None,
            lr=0.001, lb=0.001, epoch=10, weight_scale=1,
            batch_size=128, record_period=1, train_only=False, optimizer=None,
            show_loss=True, metrics=None, do_log=False, verbose=None,
            tensorboard_verbose=0, animation_params=None):
        x, y = self._feed_data(x, y)
        self._lr = lr
        self._init_optimizer(optimizer)
        print("Optimizer: ", self._optimizer.name)
        print("-" * 30)

        if not self._layers:
            raise BuildNetworkError("Please provide layers before fitting data")

        if y.shape[1] != self._current_dimension:
            raise BuildNetworkError("Output layer's shape should be {}, {} found".format(
                self._current_dimension, y.shape[1]))

        (x_train, x_test), (y_train, y_test) = self.split_data(x, y, x_test, y_test, train_only)
        train_len, test_len = len(x_train), len(x_test)
        batch_size = min(batch_size, train_len)
        do_random_batch = train_len > batch_size
        train_repeat = 1 if not do_random_batch else int(train_len / batch_size) + 1

        with tf.name_scope("Entry"):
            self._tfx = tf.placeholder(tf.float32, shape=[None, *x.shape[1:]])
        self._tfy = tf.placeholder(tf.float32, shape=[None, y.shape[1]])
        if epoch <= 0:
            return

        self._metrics = ["acc"] if metrics is None else metrics
        for i, metric in enumerate(self._metrics):
            if isinstance(metric, str):
                if metric not in self._available_metrics:
                    raise BuildNetworkError("Metric '{}' is not implemented".format(metric))
                self._metrics[i] = self._available_metrics[metric]
        self._metric_names = [_m.__name__ for _m in self._metrics]

        self._logs = {
            name: [[] for _ in range(len(self._metrics) + 1)] for name in ("Train", "Test")
        }
        if verbose is not None:
            self.verbose = verbose

        bar = ProgressBar(max_value=max(1, epoch // record_period), name="Epoch", start=False)
        if self.verbose >= NNVerbose.EPOCH:
            bar.start()
        img = None

        *animation_properties, animation_params = self._get_animation_params(animation_params)
        with self._sess.as_default() as sess:
            with tf.name_scope("ActivationFlow"):
                self._activations = self._get_activations(self._tfx)
            self._y_pred = self._activations[-1]
            l2_losses = self._get_l2_losses(lb)  # type: list
            self._loss = self._layers[-1].calculate(self._tfy, self._y_pred) + tf.reduce_sum(l2_losses)
            self._metric_rs = [metric(self._tfy, self._y_pred) for metric in self._metrics]
            self._init_train_step(sess)
            for weight in self._tf_weights:
                weight *= weight_scale

            if tensorboard_verbose > 0:
                log_dir = os.path.join("tbLogs", str(datetime.datetime.now())[:19].replace(":", "-"))
                train_dir = os.path.join(log_dir, "train")
                test_dir = os.path.join(log_dir, "test")
                for _dir in (log_dir, train_dir, test_dir):
                    if not os.path.isdir(_dir):
                        os.makedirs(_dir)
                test_summary_ops = []
                with tf.name_scope("l2_loss"):
                    layer_names = [
                        self._get_tb_name(layer) for layer in self._layers
                        if not isinstance(layer, SubLayer) and not isinstance(layer, ConvPoolLayer)
                    ]
                    for name, l2_loss in zip(layer_names, l2_losses):
                        tf.summary.scalar(name, l2_loss)
                with tf.name_scope("GlobalSummaries"):
                    test_summary_ops.append(tf.summary.scalar("loss", self._loss))
                    for name, metric_rs in zip(self._metric_names, self._metric_rs):
                        test_summary_ops.append(tf.summary.scalar(name, metric_rs))
                train_merge_op = tf.summary.merge_all()
                train_writer = tf.summary.FileWriter(train_dir, sess.graph)
                train_writer.add_graph(sess.graph)
                test_writer = tf.summary.FileWriter(test_dir)
                test_merge_op = tf.summary.merge(test_summary_ops)
            else:
                train_writer = test_writer = train_merge_op = test_merge_op = None

            args = (
                x_train, y_train, x_test, y_test, show_loss,
                self.verbose >= NNVerbose.METRICS_DETAIL,
                tensorboard_verbose, train_repeat, sess, train_merge_op, test_merge_op,
                train_writer, test_writer
            )
            ims = []
            for counter in range(epoch):
                if self.verbose >= NNVerbose.ITER and counter % record_period == 0:
                    sub_bar = ProgressBar(max_value=train_repeat * record_period - 1, name="Iteration")
                else:
                    sub_bar = None
                self._batch_training(
                    x_train, y_train, batch_size, train_repeat,
                    self._loss, self._train_step, sub_bar, counter, *args)
                self._handle_animation(
                    counter, x, y, ims, animation_params, *animation_properties,
                    img=self._draw_2d_network(**animation_params), name="Neural Network"
                )
                if (counter + 1) % record_period == 0:
                    if do_log:
                        self._append_log(x_train, y_train, None, "Train", show_loss)
                        self._append_log(x_test, y_test, None,  "Test", show_loss)
                        if self.verbose >= NNVerbose.METRICS:
                            self._print_metric_logs("Train", show_loss)
                            self._print_metric_logs("Test", show_loss)
                    if self.verbose >= NNVerbose.EPOCH:
                        bar.update(counter // record_period + 1)
        if img is not None:
            cv2.waitKey(0)
            cv2.destroyAllWindows()
        self._handle_mp4(ims, animation_properties, "NN")