示例#1
0
 def score(self, X, Y):
     prediction = self.predict(X)
     return 1 - error_rate(Y, prediction)
    def fit(self,
            X,
            Y,
            optimizer="adam",
            optimizer_params=(10e-4, 0.99, 0.999),
            reg=10e-3,
            epochs=400,
            batch_size=100,
            split=True,
            show_fig=False,
            print_every=20,
            print_tofile=False):

        #learning_rate, mu, decay, reg = map(np.float32, [learning_rate, mu, decay, reg])
        K = len(set(Y))
        X, Y = X.astype(np.float32), y_hot_encoding(Y).astype(np.float32)
        X, Y = shuffle(X, Y)
        if split:
            Xvalid, Yvalid = X[-1000:], Y[-1000:]
            X, Y = X[:-1000], Y[:-1000]
        else:
            Xvalid, Yvalid = X, Y
        Yvalid_flat = np.argmax(Yvalid, axis=1)
        ''' initialize convpool layers '''
        N, width, height, color = X.shape
        input_feature = color
        self.convpool_layers = []
        # in self.convpull_layer_sizes should be (new_feature, filter_width, filter_height)
        for index, outF_wdt_hgt in enumerate(self.convpull_layer_sizes):
            self.convpool_layers.append(
                ConvPullLayer(input_feature, *outF_wdt_hgt,
                              self.conv_nonlin_functions[index], self.poolsz))
            input_feature = outF_wdt_hgt[0]

# shape of the image after serie of convolution + maxpool layers
        final_output_width, final_output_height = width / ( self.poolsz[0] ** len(self.convpull_layer_sizes)), \
        height / (self.poolsz[1] ** len(self.convpull_layer_sizes))
        ''' initialize hidden layers '''
        # size of output feature of last convpull layer * shape of output image
        M1 = int(self.convpull_layer_sizes[-1][0] * final_output_width *
                 final_output_height)
        self.hidden_layers = []
        for id in range(len(self.hidden_layer_sizes)):
            '''BEFORE IT WAS HiddenLayerBatchNorm'''
            self.hidden_layers.append(
                HiddenLayerBatchNorm(M1, self.hidden_layer_sizes[id], id,
                                     self.nonlin_functions[id]))
            M1 = self.hidden_layer_sizes[id]

        self.hidden_layers.append(
            HiddenLayer(M1, K, len(self.hidden_layer_sizes), "None"))
        tfX = tf.placeholder(tf.float32,
                             shape=(None, width, height, color),
                             name="tfX")
        tfT = tf.placeholder(tf.float32, shape=(None, K), name="tfT")
        #self.test = tf.placeholder(tf.float32, shape=(None, D), name="tfTest")
        logits = self.forward(tfX, is_training=True)

        rcost = reg * sum([
            tf.nn.l2_loss(coefs)
            for layer in (self.convpool_layers + self.hidden_layers)
            for coefs in layer.params
        ])
        cost = tf.reduce_mean(
            tf.nn.softmax_cross_entropy_with_logits(logits=logits,
                                                    labels=tfT)) + rcost
        prediction = self.predict(tfX)

        train_op = self.optimizer(optimizer=optimizer,
                                  opt_args=optimizer_params).minimize(cost)

        n_batches = int(N / batch_size)
        batch_costs = []
        valid_costs = []
        error = []

        self.session.run(tf.global_variables_initializer())

        for i in range(epochs):
            X, Y = shuffle(X, Y)
            for j in range(n_batches):
                Xbatch = X[j * batch_size:(j * batch_size + batch_size)]
                Ybatch = Y[j * batch_size:(j * batch_size + batch_size)]
                self.session.run(train_op,
                                 feed_dict={
                                     tfX: Xbatch,
                                     tfT: Ybatch
                                 })
                if j % print_every == 0:
                    batch_costs.append(
                        self.session.run(cost,
                                         feed_dict={
                                             tfX: Xbatch,
                                             tfT: Ybatch
                                         }))
                    valid_costs.append(
                        self.session.run(cost,
                                         feed_dict={
                                             tfX: Xvalid,
                                             tfT: Yvalid
                                         }))
                    p = self.session.run(prediction,
                                         feed_dict={
                                             tfX: Xvalid,
                                             tfT: Yvalid
                                         })
                    err_rate = error_rate(Yvalid_flat, p)
                    error.append(err_rate)
                    print("i:", i, "j:", j, "nb:", n_batches, "cost:",
                          valid_costs[-1], "error_rate:", err_rate)

        print("Done!")

        if show_fig:
            plt.plot(valid_costs)
            plt.xlabel('20 * iteration', fontsize=14)
            plt.ylabel('cost', fontsize=14)
            plt.grid()
            plt.show()

        if print_tofile:
            my_df = pd.DataFrame([batch_costs, valid_costs, error])
            my_df.to_csv(print_tofile, index=False, header=False)
 def score(self, X, Y):
     #if not isinstance(Y, np.ndarray):
     Y = y_hot_encoding(Y).astype(np.float32)
     p = self.make_prediction(X)
     return 1 - error_rate(np.argmax(Y, axis=1), p)
    def fit(self,
            X,
            Y,
            learning_rate=10e-7,
            mu=0.99,
            decay=0.999,
            reg=10e-3,
            epochs=400,
            batch_size=100,
            split=True,
            show_fig=False,
            print_every=20):
        self.epochs = epochs
        K = len(set(Y))
        X, Y = X.astype(np.float32).toarray(), y_hot_encoding(Y).astype(
            np.float32)
        X, Y = shuffle(X, Y)
        if split:
            Xvalid, Yvalid = X[-1000:], Y[-1000:]
            X, Y = X[:-1000], Y[:-1000]
        else:
            Xvalid, Yvalid = X, Y
        Yvalid_flat = np.argmax(Yvalid, axis=1)
        '''Clears the default graph stack and resets the global default graph.'''
        tf.reset_default_graph()
        '''initialize hidden layers'''
        N, D = X.shape
        M1 = D
        self.hidden_layers = []
        for id in range(len(self.hidden_layer_sizes)):
            self.hidden_layers.append(
                HiddenLayerBatchNorm(M1, self.hidden_layer_sizes[id], id,
                                     self.nonlin_functions[id]))
            M1 = self.hidden_layer_sizes[id]
        self.hidden_layers.append(
            HiddenLayer_1(M1, K, len(self.hidden_layer_sizes), "None"))

        tfX = tf.placeholder(tf.float32, shape=(None, D), name="tfX")
        tfT = tf.placeholder(tf.float32, shape=(None, K), name="tfT")
        logits = self.forward(tfX, is_training=True)

        rcost = reg * sum([
            tf.nn.l2_loss(coefs) for layer in self.hidden_layers
            for coefs in layer.params
        ])
        cost = tf.reduce_mean(
            tf.nn.softmax_cross_entropy_with_logits(logits=logits,
                                                    labels=tfT)) + rcost
        prediction = self.predict(tfX)

        #train_op = tf.train.RMSPropOptimizer(learning_rate, decay=decay, momentum=mu).minimize(cost)
        train_op = tf.train.AdamOptimizer(learning_rate,
                                          beta1=0.99,
                                          beta2=0.999).minimize(cost)
        #train_op = tf.train.MomentumOptimizer(learning_rate, momentum=mu, use_nesterov=False).minimize(cost)
        #train_op = tf.train.ProximalGradientDescentOptimizer(learning_rate, l2_regularization_strength=0.0, use_locking=False).minimize(cost)

        n_batches = int(N / batch_size)
        costs = []
        init = tf.global_variables_initializer()
        with tf.Session() as session:
            session.run(init)
            for i in range(epochs):
                X, Y = shuffle(X, Y)
                for j in range(n_batches):
                    Xbatch = X[j * batch_size:(j * batch_size + batch_size)]
                    Ybatch = Y[j * batch_size:(j * batch_size + batch_size)]
                    session.run(train_op, feed_dict={tfX: Xbatch, tfT: Ybatch})

                    if j % print_every == 0:
                        costs.append(
                            session.run(cost,
                                        feed_dict={
                                            tfX: Xvalid,
                                            tfT: Yvalid
                                        }))
                        p = session.run(prediction,
                                        feed_dict={
                                            tfX: Xvalid,
                                            tfT: Yvalid
                                        })
                        print("i:", i, "j:", j, "nb:", n_batches, "cost:",
                              costs[-1], "error_rate:",
                              error_rate(Yvalid_flat, p))
            saver = tf.train.Saver()
            '''Now, save the graph'''
            saver.save(session,
                       './my_model-' + str(self.counter),
                       global_step=self.epochs)
            print("Done!")

        if show_fig:
            plt.plot(costs)
            plt.show()