Exemplo n.º 1
0
    def eval(self, batch_size, out_name, mini_data=True):
        val_dataset = DataGen(self.IMG_SIZE, 5, False, mini_data)
        val_dataset.generator(batch_size)

        print("val data size: ", val_dataset.get_dataset_size())

        Y_pred = []
        X = []
        Y_gt = []
        count = 0
        for X_batch, y_batch in val_dataset.generator(batch_size):

            count += batch_size
            print("count:", count)

            if count > val_dataset.get_dataset_size():
                break

            y_pred = self.model.predict(X_batch)

            y_pred = np.argmax(y_pred, -1)
            y_gt = np.argmax(y_batch, -1)
            Y_pred = np.concatenate([Y_pred, y_pred])
            Y_gt = np.concatenate([Y_gt, y_gt])
        acc = accuracy_score(Y_gt, Y_pred)
        print('Eval Accuracy: %2.2f%%' % acc)
        # sns.heatmap(confusion_matrix(Y_gt, Y_pred),
        #             annot=True, fmt="d", cbar=False, cmap=plt.cm.Blues, vmax=Y_pred.shape[0] // 16)
        # plt.show()
        np_confusion = confusion_matrix(Y_gt, Y_pred)
        np.save('confusion_' + str(out_name) + '.npy', np_confusion)
Exemplo n.º 2
0
    def train(self, batch_size, epoches, out_name, mini_data):
        print(mini_data)

        # learning rate schedule
        def step_decay(epoch):
            initial_lrate = 1e-3
            drop = 0.5
            epochs_drop = 7.0
            lrate = initial_lrate * math.pow(
                drop, math.floor((1 + epoch) / epochs_drop))
            return lrate

        train_dataset = DataGen(self.IMG_SIZE, 5, True, mini=mini_data)
        train_gen = train_dataset.generator(batch_size, True)

        TIMESTAMP = "{0:%Y-%m-%dT%H-%M-%S}".format(datetime.datetime.now())

        callbacks_list = [
            EvalCallBack(out_name),
            EarlyStopping(monitor='val_loss', mode='min', patience=6),
            TensorBoard(log_dir='logs/' + TIMESTAMP,
                        batch_size=batch_size,
                        update_freq='epoch'),
            LearningRateScheduler(step_decay)
        ]

        self.model.compile(optimizer='adam',
                           loss='categorical_crossentropy',
                           metrics=['categorical_accuracy'])
        self.model.fit_generator(
            generator=train_gen,
            steps_per_epoch=train_dataset.get_dataset_size() // batch_size,
            epochs=epoches,
            callbacks=callbacks_list)
Exemplo n.º 3
0
    def run_eval(self, epoch, batch_size=16):
        val_dataset = DataGen(self.IMG_SIZE, 5, False)
        val_gen = val_dataset.generator(batch_size)

        count = 0
        Y_pred = []
        Y_gt = []
        for X_batch, y_batch in val_gen:

            count += batch_size
            if count > val_dataset.get_dataset_size():
                break

            y_pred = self.model.predict(X_batch)

            y_pred = np.argmax(y_pred, -1)
            y_gt = np.argmax(y_batch, -1)
            Y_pred = np.concatenate([Y_pred, y_pred])
            Y_gt = np.concatenate([Y_gt, y_gt])
        acc = accuracy_score(Y_gt, Y_pred)
        print('Eval Accuracy: %2.2f%%' % acc, '@ Epoch ', epoch)
        if (epoch+1) % 10 == 0:
            print(classification_report(Y_gt, Y_pred))

        with open('checkpoints/'+self.out_name+'/val.txt', 'a+') as xfile:
            xfile.write('Epoch ' + str(epoch) + ':' + str(acc) + '\n')
Exemplo n.º 4
0
    def resume_train(self,
                     batch_size,
                     model_json,
                     model_weights,
                     init_epoch,
                     epochs,
                     out_name,
                     mini_data=True):

        self.load_model(model_json, model_weights)
        self.model.compile(optimizer=Adam(lr=5e-4),
                           loss='categorical_crossentropy',
                           metrics=["categorical_accuracy"])

        train_dataset = DataGen(self.IMG_SIZE,
                                5,
                                is_train=True,
                                mini=mini_data)
        train_gen = train_dataset.generator(batch_size, True)

        model_dir = os.path.dirname(os.path.abspath(model_json))
        print(model_dir, model_json)

        TIMESTAMP = "{0:%Y-%m-%dT%H-%M-%S}".format(datetime.datetime.now())
        callbacks_list = [
            EvalCallBack(out_name),
            EarlyStopping(monitor='val_loss', mode='min', patience=6),
            TensorBoard(log_dir='logs/' + TIMESTAMP,
                        batch_size=batch_size,
                        update_freq='epoch')
        ]

        self.model.fit_generator(
            generator=train_gen,
            steps_per_epoch=train_dataset.get_dataset_size() // batch_size,
            initial_epoch=init_epoch,
            epochs=epochs,
            callbacks=callbacks_list)