Example #1
0
 def setUp(self):
     self.dataset_train = Dataset("test",
                                  fold_number=1,
                                  mode="train",
                                  sequence=True)
     self.dataset_validate = Dataset("test",
                                     fold_number=1,
                                     mode="validate",
                                     sequence=True)
     self.doc2vec = Doc2Vec("test",
                            self.dataset_train.number_of_classes(),
                            min_count=1)
Example #2
0
 def setUp(self):
     self.dataset_train = Dataset(data_name="test",
                                  mode="train",
                                  fold_number=1,
                                  sequence=True)
     self.dataset_validate = Dataset(data_name="test",
                                     mode="validate",
                                     fold_number=1,
                                     sequence=True)
     self.dataset_test = Dataset(data_name="test",
                                 mode="test",
                                 fold_number=1,
                                 sequence=True)
Example #3
0
    def infer(self, input_file_path, model_file, output_file_path):
        print("Infering ...")
        check_key_in_dict(dictionary=self.configs, keys=["tfrecords_dir"])
        msg = self.load_model(model_file)
        if msg:
            raise Exception(msg)
        tf_infer_dataset = Dataset(data_path=input_file_path,
                                   tfrecords_dir=self.configs["tfrecords_dir"],
                                   mode="infer")
        tf_infer_dataset = tf_infer_dataset(
            batch_size=self.configs["batch_size"],
            text_featurizer=self.text_featurizer,
            speech_conf=self.configs["speech_conf"])

        def infer_step(feature, input_length):
            prediction = self.predict(feature, input_length)
            return bytes_to_string(prediction.numpy())

        for features, inp_length in tf_infer_dataset:
            predictions = infer_step(features, inp_length)

            with open(output_file_path, "a", encoding="utf-8") as of:
                of.write("Predictions\n")
                for pred in predictions:
                    of.write(pred + "\n")
 def setUp(self):
     self.dataset = Dataset("test", "train", 1, sequence=True)
     self.dataset_validate = Dataset("test", "validate", 1, sequence=True)
     self.dataset_test = Dataset("test", "test", 1, sequence=True)
     doc2vec = TempDoc2vec()
     self.dataset.change_to_Doc2Vec(doc2vec)
     self.dataset_validate.change_to_Doc2Vec(doc2vec)
     self.dataset_test.change_to_Doc2Vec(doc2vec)
     hidden = [5] * self.dataset.number_of_level()
     batch_size = [3] * self.dataset.number_of_level()
     target_hidden = [3] * (self.dataset.number_of_level() - 1)
     self.model = ESLNN(
         "test", self.dataset, self.dataset_validate, self.dataset_test, 30, hidden, target_hidden, stopping_time=3, batch_size=batch_size)
     self.model.classifier[0].dense.weight.data.fill_(1)
     self.model.classifier[0].dense.bias.data.zero_()
     self.model.classifier[0].logit.weight.data.fill_(0.2)
     self.model.classifier[0].logit.bias.data.zero_()
Example #5
0
 def setUp(self):
     self.model = LCPL_ESLNN_First(7, 5, 2, use_dropout=False)
     self.model.dense.weight.data.fill_(1)
     self.model.dense.bias.data.zero_()
     self.model.logit.weight.data.fill_(0.2)
     self.model.logit.bias.data.zero_()
     if torch.cuda.is_available():
         self.model = self.model.cuda()
     self.dataset = Dataset("test", "train", 1, sequence=True)
     doc2vec = TempDoc2vec()
     self.dataset.change_to_Doc2Vec(doc2vec)
Example #6
0
    def test_with_noise_filter(self, model_file, output_file_path):
        print("Testing model ...")
        if not self.noise_filter:
            raise ValueError("noise_filter must be defined")

        check_key_in_dict(dictionary=self.configs,
                          keys=["test_data_transcript_paths", "tfrecords_dir"])
        test_dataset = Dataset(
            data_path=self.configs["test_data_transcript_paths"],
            tfrecords_dir=self.configs["tfrecords_dir"],
            mode="test")
        msg = self.load_saved_model(model_file)
        if msg:
            raise Exception(msg)

        tf_test_dataset = test_dataset(text_featurizer=self.text_featurizer,
                                       speech_conf=self.configs["speech_conf"],
                                       batch_size=1,
                                       feature_extraction=False)

        def test_step(signal, label):
            prediction = self.infer_single(signal)
            label = self.decoder.convert_to_string_single(label)

            print(f"Pred: {prediction}")
            print(f"Groundtruth: {label}")
            _wer, _wer_count = wer(decode=prediction, target=label)
            _cer, _cer_count = cer(decode=prediction, target=label)

            gc.collect()

            return _wer, _wer_count, _cer, _cer_count

        total_wer = 0.0
        wer_count = 0.0
        total_cer = 0.0
        cer_count = 0.0

        for signal, label in tf_test_dataset.as_numpy_iterator():
            batch_wer, batch_wer_count, batch_cer, batch_cer_count = test_step(
                signal, label)
            total_wer += batch_wer
            total_cer += batch_cer
            wer_count += batch_wer_count
            cer_count += batch_cer_count

        results = (total_wer / wer_count, total_cer / cer_count)

        print(f"WER: {results[0]}, CER: {results[-1]}")

        with open(output_file_path, "w", encoding="utf-8") as of:
            of.write("WER: " + str(results[0]) + "\n")
            of.write("CER: " + str(results[-1]) + "\n")
Example #7
0
    def read(self):
        examples = []
        indexes_to_remove = [self.class_position] + self.ignore_columns
        with open(self.file_path) as csv_file:
            csv_reader = csv.reader(csv_file, delimiter=',')
            for row in csv_reader:
                klass = row[self.class_position]
                attributes = row.copy()
                for index in sorted(indexes_to_remove, reverse=True):
                    del attributes[index]
                attributes_and_klass = attributes + [klass]
                examples.append(
                    Example(list(range(len(attributes_and_klass))),
                            attributes_and_klass))
        dataset = Dataset(examples)
        if self.normalization:
            dataset.data_normalization()

        return dataset
Example #8
0
# coding: utf-8

from models.NeuralNetwork import NeuralNetwork
from models.NeuralNetworkMath import NeuralNetworkMath
from data.Dataset import Dataset
from data.Example import Example

inputs = 1
outputs = 1
hidden_layer_neurons = 3
example = Example([1, 2], [1, 1])
dataset = Dataset([example])
parameters = {
    "layers_structure": [inputs, hidden_layer_neurons, outputs],
    "lambda": 0.1
}


def test_weight_matrices():
    nn = NeuralNetwork(parameters, dataset, debug=True)
    assert len(nn.weight_matrices) == 2  # input -> hidden, hidden -> output
    assert len(nn.weight_matrices[0]) == hidden_layer_neurons
    assert len(nn.weight_matrices[1]) == outputs
    assert len(nn.weight_matrices[0][0]) == inputs
    assert len(nn.weight_matrices[1][0]) == hidden_layer_neurons


def test_prediction():
    nn = NeuralNetwork(parameters, dataset, debug=True)
    first_line = nn.predict([1])[0]
    assert first_line == [0.9744787489988975]
 def setUp(self):
     self.dataset = Dataset("test", "train", 1, sequence=True)
     doc2vec = TempDoc2vec()
     self.dataset.change_to_Doc2Vec(doc2vec)
Example #10
0
    args["datapath"].split("/")[-1],
    args["nsnap"],
    args["num_child_voxel"],
)

# TODO: Change to parse args
partition = {"train": ["id-1", "id-2", "id-3"], "validation": ["id-4"]}
BATCH_SIZE = 2
LEARNING_RATE = 0.001
MAX_EPOCHS = 2

# INITIALIZE NETWORK ***********************************************************#
use_cuda = torch.cuda.is_available()
device = torch.device("cuda:0" if use_cuda else "cpu")

training_set = Dataset(partition["train"])
training_generator = data.DataLoader(training_set,
                                     batch_size=BATCH_SIZE,
                                     shuffle=True,
                                     num_workers=20)

validation_set = Dataset(partition["validation"])
validation_generator = data.DataLoader(validation_set,
                                       batch_size=BATCH_SIZE,
                                       shuffle=False)

# initialize model
model = SegNet().to(device)
model = model.double()

# set loss function and optimizer
Example #11
0
    def train_and_eval(self, model_file=None):
        print("Training and evaluating model ...")
        self._create_checkpoints(self.model)

        check_key_in_dict(dictionary=self.configs,
                          keys=[
                              "tfrecords_dir", "checkpoint_dir",
                              "augmentations", "log_dir",
                              "train_data_transcript_paths"
                          ])
        augmentations = self.configs["augmentations"]
        augmentations.append(None)

        train_dataset = Dataset(
            data_path=self.configs["train_data_transcript_paths"],
            tfrecords_dir=self.configs["tfrecords_dir"],
            mode="train")
        tf_train_dataset = train_dataset.get_dataset_from_generator(
            text_featurizer=self.text_featurizer,
            speech_conf=self.configs["speech_conf"],
            batch_size=self.configs["batch_size"],
            augmentations=augmentations)

        tf_eval_dataset = None

        if self.configs["eval_data_transcript_paths"]:
            eval_dataset = Dataset(
                data_path=self.configs["eval_data_transcript_paths"],
                tfrecords_dir=self.configs["tfrecords_dir"],
                mode="eval")
            tf_eval_dataset = eval_dataset.get_dataset_from_generator(
                text_featurizer=self.text_featurizer,
                speech_conf=self.configs["speech_conf"],
                batch_size=self.configs["batch_size"],
                augmentations=[None])

        self.model.summary()

        initial_epoch = 0
        if self.ckpt_manager.latest_checkpoint:
            initial_epoch = int(
                self.ckpt_manager.latest_checkpoint.split('-')[-1])
            # restoring the latest checkpoint in checkpoint_path
            self.ckpt.restore(self.ckpt_manager.latest_checkpoint)

        if self.configs["log_dir"]:
            if not os.path.exists(self.configs["log_dir"]):
                os.makedirs(self.configs["log_dir"])
            with open(os.path.join(self.configs["log_dir"], "model.json"),
                      "w") as f:
                f.write(self.model.to_json())
            self.writer = tf.summary.create_file_writer(
                os.path.join(self.configs["log_dir"], "train"))

        if self.configs["last_activation"] != "softmax":
            loss = ctc_loss
        else:
            loss = ctc_loss_1

        epochs = self.configs["num_epochs"]

        for epoch in range(initial_epoch, epochs, 1):
            epoch_eval_loss = None
            epoch_eval_wer = None
            start = time.time()

            self.train(self.model, tf_train_dataset, self.optimizer, loss,
                       self.text_featurizer.num_classes, epoch, epochs)

            print(f"\nEnd training on epoch = {epoch}")

            self.ckpt_manager.save()
            print(f"Saved checkpoint at epoch {epoch + 1}")

            if tf_eval_dataset:
                print("Validating ... ")
                epoch_eval_loss, epoch_eval_wer = self.validate(
                    self.model, self.decoder, tf_eval_dataset, loss,
                    self.text_featurizer.num_classes,
                    self.configs["last_activation"])
                print(
                    f"Average_val_loss = {epoch_eval_loss}, val_wer = {epoch_eval_wer}"
                )

            time_epoch = time.time() - start
            print(f"Time for epoch {epoch + 1} is {time_epoch} secs")

            if self.writer:
                with self.writer.as_default():
                    if epoch_eval_loss and epoch_eval_wer:
                        tf.summary.scalar("eval_loss",
                                          epoch_eval_loss,
                                          step=epoch)
                        tf.summary.scalar("eval_wer",
                                          epoch_eval_wer,
                                          step=epoch)
                    tf.summary.scalar("epoch_time", time_epoch, step=epoch)

        if model_file:
            self.save_model(model_file)
Example #12
0
def train(model_name, weight_path, save_path, logdir=None):
    assert model_name in ['yolov3_tiny', 'yolov3', 'yolov4']

    physical_devices = tf.config.experimental.list_physical_devices('GPU')
    if len(physical_devices) > 0:
        tf.config.experimental.set_memory_growth(physical_devices[0], True)

    NUM_CLASS = len(utils.read_class_names(cfg.YOLO.CLASSES))
    STRIDES = np.array(cfg.YOLO.STRIDES)
    IOU_LOSS_THRESH = cfg.YOLO.IOU_LOSS_THRESH
    XYSCALE = cfg.YOLO.XYSCALE
    ANCHORS = utils.get_anchors(cfg.YOLO.ANCHORS)

    trainset = Dataset('train')
    testset = Dataset('test')

    isfreeze = False
    steps_per_epoch = len(trainset)
    first_stage_epochs = cfg.TRAIN.FISRT_STAGE_EPOCHS
    second_stage_epochs = cfg.TRAIN.SECOND_STAGE_EPOCHS

    global_steps = tf.Variable(1, trainable=False, dtype=tf.int64)
    warmup_steps = cfg.TRAIN.WARMUP_EPOCHS * steps_per_epoch
    total_steps = (first_stage_epochs + second_stage_epochs) * steps_per_epoch

    input_layer = tf.keras.layers.Input([cfg.TRAIN.INPUT_SIZE, cfg.TRAIN.INPUT_SIZE, 3])
    if model_name=='yolov3_tiny':
        feature_maps = YOLOv3_tiny(input_layer, NUM_CLASS)
        bbox_tensors = []
        for i, fm in enumerate(feature_maps):
            bbox_tensor = ops.decode_train(fm, NUM_CLASS, STRIDES, ANCHORS, i)
            bbox_tensors.append(fm)
            bbox_tensors.append(bbox_tensor)
        model = tf.keras.Model(input_layer, bbox_tensors)

    elif model_name=='yolov3':
        feature_maps = YOLOv3(input_layer, NUM_CLASS)
        bbox_tensors = []
        for i, fm in enumerate(feature_maps):
            bbox_tensor = ops.decode_train(fm, NUM_CLASS, STRIDES, ANCHORS, i)
            bbox_tensors.append(fm)
            bbox_tensors.append(bbox_tensor)
        model = tf.keras.Model(input_layer, bbox_tensors)

    elif model_name=='yolov4':
        feature_maps = YOLOv4(input_layer, NUM_CLASS)
        bbox_tensors = []
        for i, fm in enumerate(feature_maps):
            bbox_tensor = ops.decode_train(fm, NUM_CLASS, STRIDES, ANCHORS, i, XYSCALE)
            bbox_tensors.append(fm)
            bbox_tensors.append(bbox_tensor)
        model = tf.keras.Model(input_layer, bbox_tensors)
    else:
        raise ValueError

    # for name in ['conv2d_93', 'conv2d_101', 'conv2d_109']:
    #     layer = model.get_layer(name)
    #     print(layer.name, layer.output_shape)

    if weight_path:
        if weight_path.split(".")[-1] == "weights":
            if model_name == 'yolov3_tiny':
                utils.load_weights_tiny(model, weight_path)
            elif model_name=='yolov3':
                utils.load_weights_v3(model, weight_path)
            elif model_name=='yolov4':
                utils.load_weights(model, weight_path)
            else:
                raise ValueError
        else:
            model.load_weights(weight_path)
        print('Restoring weights from: %s ... ' % weight_path)

    optimizer = tf.keras.optimizers.Adam()

    if logdir:
        if os.path.exists(logdir):
            shutil.rmtree(logdir)
        writer = tf.summary.create_file_writer(logdir)
    else:
        writer = None

    def train_step(image_data, target):
        with tf.GradientTape() as tape:
            pred_result = model(image_data, training=True)
            giou_loss = conf_loss = prob_loss = 0

            # optimizing process
            for i in range(3):
                conv, pred = pred_result[i * 2], pred_result[i * 2 + 1]
                loss_items = ops.compute_loss(pred, conv, target[i][0], target[i][1],
                                              STRIDES=STRIDES, NUM_CLASS=NUM_CLASS,
                                              IOU_LOSS_THRESH=IOU_LOSS_THRESH, i=i)
                giou_loss += loss_items[0]
                conf_loss += loss_items[1]
                prob_loss += loss_items[2]

            total_loss = giou_loss + conf_loss + prob_loss
            gradients = tape.gradient(total_loss, model.trainable_variables)
            optimizer.apply_gradients(zip(gradients, model.trainable_variables))

            tf.print("=> STEP %4d   lr: %.6f   giou_loss: %4.2f   conf_loss: %4.2f   "
                     "prob_loss: %4.2f   total_loss: %4.2f" % (global_steps, optimizer.lr.numpy(),
                                                               giou_loss, conf_loss,
                                                               prob_loss, total_loss))

            # update learning rate
            global_steps.assign_add(1)
            if global_steps < warmup_steps:
                lr = global_steps / warmup_steps * cfg.TRAIN.LR_INIT
            else:
                lr = cfg.TRAIN.LR_END + \
                     0.5*(cfg.TRAIN.LR_INIT - cfg.TRAIN.LR_END) * \
                     ((1 + tf.cos((global_steps - warmup_steps) / (total_steps - warmup_steps) * np.pi)))
            optimizer.lr.assign(lr.numpy())

            # if writer:
            #     # writing summary data
            #     with writer.as_default():
            #         tf.summary.scalar("lr", optimizer.lr, step=global_steps)
            #         tf.summary.scalar("loss/total_loss", total_loss, step=global_steps)
            #         tf.summary.scalar("loss/giou_loss", giou_loss, step=global_steps)
            #         tf.summary.scalar("loss/conf_loss", conf_loss, step=global_steps)
            #         tf.summary.scalar("loss/prob_loss", prob_loss, step=global_steps)
            #     writer.flush()

    def test_step(image_data, target):
        pred_result = model(image_data, training=True)
        giou_loss = conf_loss = prob_loss = 0

        # optimizing process
        for i in range(3):
            conv, pred = pred_result[i * 2], pred_result[i * 2 + 1]
            loss_items = ops.compute_loss(pred, conv, target[i][0], target[i][1],
                                          STRIDES=STRIDES, NUM_CLASS=NUM_CLASS,
                                          IOU_LOSS_THRESH=IOU_LOSS_THRESH, i=i)
            giou_loss += loss_items[0]
            conf_loss += loss_items[1]
            prob_loss += loss_items[2]

        total_loss = giou_loss + conf_loss + prob_loss

        tf.print("=> TEST STEP %4d   giou_loss: %4.2f   conf_loss: %4.2f   "
                     "prob_loss: %4.2f   total_loss: %4.2f" % (global_steps, giou_loss, conf_loss,
                                                               prob_loss, total_loss))

    for epoch in range(first_stage_epochs + second_stage_epochs):
        if epoch < first_stage_epochs:
            if not isfreeze:
                isfreeze = True
                for name in ['conv2d_93', 'conv2d_101', 'conv2d_109']:
                    freeze = model.get_layer(name)
                    ops.freeze_all(freeze)

        elif epoch >= first_stage_epochs:
            if isfreeze:
                isfreeze = False
                for name in ['conv2d_93', 'conv2d_101', 'conv2d_109']:
                    freeze = model.get_layer(name)
                    ops.unfreeze_all(freeze)

        for image_data, target in trainset:
            train_step(image_data, target)

        for image_data, target in testset:
            test_step(image_data, target)

        if save_path:
            model.save_weights(save_path)
Example #13
0
from scipy import stats

import numpy as np

from classification import DecisionTreeClassifier
from data.Dataset import Dataset
from eval import Evaluator

if __name__ == "__main__":
    print("Loading the training dataset...")
    dataset = Dataset()
    dataset.readData("data/train_full.txt")
    x = dataset.features
    y = dataset.labels

    print("Training the decision tree...")
    classifier = DecisionTreeClassifier()
    classifier = classifier.train(x, y)

    classifier.print()

    print("\n")

    print("Tree visualisation graphically")
    print("\n")
    print("\n")
    print("\n")
    classifier.printImageTree()
    print("\n")
    print("\n")
    print("\n")
Example #14
0
    def test(self, model_file, output_file_path):
        print("Testing model ...")
        check_key_in_dict(dictionary=self.configs,
                          keys=["test_data_transcript_paths", "tfrecords_dir"])
        test_dataset = Dataset(
            data_path=self.configs["test_data_transcript_paths"],
            tfrecords_dir=self.configs["tfrecords_dir"],
            mode="test")
        msg = self.load_saved_model(model_file)
        if msg:
            raise Exception(msg)

        tf_test_dataset = test_dataset(text_featurizer=self.text_featurizer,
                                       speech_conf=self.configs["speech_conf"],
                                       batch_size=self.configs["batch_size"])

        def test_step(features, inp_length, transcripts):
            predictions = self.predict(features, inp_length)
            predictions = bytes_to_string(predictions.numpy())

            transcripts = self.decoder.convert_to_string(transcripts)

            b_wer = 0.0
            b_wer_count = 0.0
            b_cer = 0.0
            b_cer_count = 0.0

            for idx, decoded in enumerate(predictions):
                print(f"Pred: {decoded}")
                print(f"Groundtruth: {transcripts[idx]}")
                _wer, _wer_count = wer(decode=decoded, target=transcripts[idx])
                _cer, _cer_count = cer(decode=decoded, target=transcripts[idx])
                b_wer += _wer
                b_cer += _cer
                b_wer_count += _wer_count
                b_cer_count += _cer_count

            gc.collect()

            return b_wer, b_wer_count, b_cer, b_cer_count

        total_wer = 0.0
        wer_count = 0.0
        total_cer = 0.0
        cer_count = 0.0

        for feature, input_length, label, _ in tf_test_dataset:
            batch_wer, batch_wer_count, batch_cer, batch_cer_count = test_step(
                feature, input_length, label)
            total_wer += batch_wer
            total_cer += batch_cer
            wer_count += batch_wer_count
            cer_count += batch_cer_count

        results = (total_wer / wer_count, total_cer / cer_count)

        print(f"WER: {results[0]}, CER: {results[-1]}")

        with open(output_file_path, "w", encoding="utf-8") as of:
            of.write("WER: " + str(results[0]) + "\n")
            of.write("CER: " + str(results[-1]) + "\n")
Example #15
0
    def keras_train_and_eval(self, model_file=None):
        print("Training and evaluating model ...")

        check_key_in_dict(dictionary=self.configs,
                          keys=[
                              "tfrecords_dir", "checkpoint_dir",
                              "augmentations", "log_dir",
                              "train_data_transcript_paths"
                          ])
        augmentations = self.configs["augmentations"]
        augmentations.append(None)

        train_dataset = Dataset(
            data_path=self.configs["train_data_transcript_paths"],
            tfrecords_dir=self.configs["tfrecords_dir"],
            mode="train",
            is_keras=True)
        tf_train_dataset = train_dataset(
            text_featurizer=self.text_featurizer,
            speech_conf=self.configs["speech_conf"],
            batch_size=self.configs["batch_size"],
            augmentations=augmentations)
        # tf_train_dataset_sortagrad = train_dataset(text_featurizer=self.text_featurizer,
        #                                            speech_conf=self.configs["speech_conf"],
        #                                            batch_size=self.configs["batch_size"],
        #                                            augmentations=augmentations, sortagrad=True)

        tf_eval_dataset = None
        if self.configs["eval_data_transcript_paths"]:
            eval_dataset = Dataset(
                data_path=self.configs["eval_data_transcript_paths"],
                tfrecords_dir=self.configs["tfrecords_dir"],
                mode="eval",
                is_keras=True)
            tf_eval_dataset = eval_dataset(
                text_featurizer=self.text_featurizer,
                speech_conf=self.configs["speech_conf"],
                batch_size=self.configs["batch_size"])

        train_model = create_ctc_train_model(
            self.model,
            last_activation=self.configs["last_activation"],
            num_classes=self.text_featurizer.num_classes)
        self._create_checkpoints(train_model)

        self.model.summary()

        initial_epoch = 0
        if self.ckpt_manager.latest_checkpoint:
            initial_epoch = int(
                self.ckpt_manager.latest_checkpoint.split('-')[-1])
            # restoring the latest checkpoint in checkpoint_path
            self.ckpt.restore(self.ckpt_manager.latest_checkpoint)

        train_model.compile(optimizer=self.optimizer,
                            loss={
                                "ctc_loss": lambda y_true, y_pred: y_pred
                            })

        callback = [Checkpoint(self.ckpt_manager)]
        if self.configs["log_dir"]:
            if not os.path.exists(self.configs["log_dir"]):
                os.makedirs(self.configs["log_dir"])
            with open(os.path.join(self.configs["log_dir"], "model.json"),
                      "w") as f:
                f.write(self.model.to_json())
            callback.append(
                TimeHistory(os.path.join(self.configs["log_dir"], "time.txt")))
            callback.append(
                tf.keras.callbacks.TensorBoard(
                    log_dir=self.configs["log_dir"]))

        if tf_eval_dataset is not None:
            # if initial_epoch == 0:
            #   train_model.fit(x=tf_train_dataset_sortagrad, epochs=1,
            #                   validation_data=tf_eval_dataset, shuffle="batch",
            #                   initial_epoch=initial_epoch, callbacks=callback)
            #   initial_epoch = 1

            train_model.fit(x=tf_train_dataset,
                            epochs=self.configs["num_epochs"],
                            validation_data=tf_eval_dataset,
                            shuffle="batch",
                            initial_epoch=initial_epoch,
                            callbacks=callback)
        else:
            # if initial_epoch == 0:
            #   train_model.fit(x=tf_train_dataset_sortagrad, epochs=1, shuffle="batch",
            #                   initial_epoch=initial_epoch, callbacks=callback)
            #   initial_epoch = 1

            train_model.fit(x=tf_train_dataset,
                            epochs=self.configs["num_epochs"],
                            shuffle="batch",
                            initial_epoch=initial_epoch,
                            callbacks=callback)

        if model_file:
            self.save_model(model_file)
Example #16
0
    app_name = args["app_name"]
    '''
    常数定义
    '''
    file_name = "File_Directory/results/{}.json".format(app_name)
    new_data_name = "{}_re_predict_data".format(app_name)
    new_result_name = "{}_re_predict_out".format(app_name)
    final_result_name = "{}_final_out".format(app_name)
    threshold = args["re_predict_threshold"]
    mix_rate = args['re_predict_mix_rate']
    decay_rate = args['re_predict_decay_rate']
    select_threshold = args['re_predict_select_threshold']
    '''
    预测过程
    '''
    datasets = Dataset(logger=logger, args=param.get_config(param.DATASET))
    datasets.load_examples()
    trainset, validset, testset = datasets.get_split()

    predict_preprocess = PreProcess(logger=logger,
                                    args=param.get_config(param.DATASET),
                                    examples=testset,
                                    for_prediction=True)
    predict_preprocess.prepare_batch_data(cache_filename="")
    predict_vocab_size = predict_preprocess.get_vocab_size()
    predict_batch_reader = predict_preprocess.batch_generator()

    predict_engine = PredictEngine(param=param, logger=logger, vocab_size=1)
    predict_engine.init_model(vocab_size=predict_vocab_size)

    predict_engine.predict(predict_batch_reader)
Example #17
0
def evaluate(model_name, weight_path):
    assert model_name in ['yolov3_tiny', 'yolov3', 'yolov4']

    physical_devices = tf.config.experimental.list_physical_devices('GPU')
    if len(physical_devices) > 0:
        tf.config.experimental.set_memory_growth(physical_devices[0], True)

    NUM_CLASS = len(utils.read_class_names(cfg.YOLO.CLASSES))
    STRIDES = np.array(cfg.YOLO.STRIDES)
    IOU_LOSS_THRESH = cfg.YOLO.IOU_LOSS_THRESH
    XYSCALE = cfg.YOLO.XYSCALE
    ANCHORS = utils.get_anchors(cfg.YOLO.ANCHORS)

    trainset = Dataset('train')

    isfreeze = False
    steps_per_epoch = len(trainset)
    first_stage_epochs = cfg.TRAIN.FISRT_STAGE_EPOCHS
    second_stage_epochs = cfg.TRAIN.SECOND_STAGE_EPOCHS

    global_steps = tf.Variable(1, trainable=False, dtype=tf.int64)
    warmup_steps = cfg.TRAIN.WARMUP_EPOCHS * steps_per_epoch
    total_steps = (first_stage_epochs + second_stage_epochs) * steps_per_epoch

    input_layer = tf.keras.layers.Input([cfg.TRAIN.INPUT_SIZE, cfg.TRAIN.INPUT_SIZE, 3])
    if model_name=='yolov3_tiny':
        feature_maps = YOLOv3_tiny(input_layer, NUM_CLASS)
        bbox_tensors = []
        for i, fm in enumerate(feature_maps):
            bbox_tensor = ops.decode_train(fm, NUM_CLASS, STRIDES, ANCHORS, i)
            bbox_tensors.append(fm)
            bbox_tensors.append(bbox_tensor)
        model = tf.keras.Model(input_layer, bbox_tensors)
    elif model_name=='yolov3':
        feature_maps = YOLOv3(input_layer, NUM_CLASS)
        bbox_tensors = []
        for i, fm in enumerate(feature_maps):
            bbox_tensor = ops.decode_train(fm, NUM_CLASS, STRIDES, ANCHORS, i)
            bbox_tensors.append(fm)
            bbox_tensors.append(bbox_tensor)
        model = tf.keras.Model(input_layer, bbox_tensors)
    elif model_name=='yolov4':
        feature_maps = YOLOv4(input_layer, NUM_CLASS)
        bbox_tensors = []
        for i, fm in enumerate(feature_maps):
            bbox_tensor = ops.decode_train(fm, NUM_CLASS, STRIDES, ANCHORS, i, XYSCALE)
            bbox_tensors.append(fm)
            bbox_tensors.append(bbox_tensor)
        model = tf.keras.Model(input_layer, bbox_tensors)
    else:
        raise ValueError

    if weight_path:
        if weight_path.split(".")[-1] == "weights":
            if model_name == 'yolov3_tiny':
                utils.load_weights_tiny(model, weight_path)
            elif model_name=='yolov3':
                utils.load_weights_v3(model, weight_path)
            elif model_name=='yolov4':
                utils.load_weights(model, weight_path)
            else:
                raise ValueError
        else:
            model.load_weights(weight_path)
        print('Restoring weights from: %s ... ' % weight_path)

    trainset = Dataset('train')

    for image_data, target in trainset:
        pred_result = model(image_data, training=True)
        giou_loss = conf_loss = prob_loss = 0

        for i in range(3):
            conv, pred = pred_result[i * 2], pred_result[i * 2 + 1]
            loss_items = ops.compute_loss(pred, conv, target[i][0], target[i][1],
                                              STRIDES=STRIDES, NUM_CLASS=NUM_CLASS,
                                              IOU_LOSS_THRESH=IOU_LOSS_THRESH, i=i)
            giou_loss += loss_items[0]
            conf_loss += loss_items[1]
            prob_loss += loss_items[2]

        total_loss = giou_loss + conf_loss + prob_loss

        tf.print("=> STEP %4d   giou_loss: %4.2f   conf_loss: %4.2f   "
                 "prob_loss: %4.2f   total_loss: %4.2f" % (global_steps, giou_loss,
                                                           conf_loss, prob_loss, total_loss))
Example #18
0
tf.random.set_random_seed(2017)

if __name__ == "__main__":
    config = configparser.ConfigParser()
    config.read("neurec.properties")
    conf = dict(config.items("default"))
    data_input_path = conf["data.input.path"]
    dataset_name = conf["data.input.dataset"]
    splitter = conf["data.splitter"]
    separator = eval(conf["data.convert.separator"])
    threshold = float(conf["data.convert.binarize.threshold"])
    recommender = str(conf["recommender"])
    evaluate_neg = int(conf["rec.evaluate.neg"])
    num_thread = int(conf["rec.number.thread"])
    splitterRatio = list(eval(conf["data.splitterratio"]))
    dataset = Dataset(data_input_path, splitter, separator, threshold, evaluate_neg, dataset_name, splitterRatio)

    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    with tf.Session(config=config) as sess:
        if recommender.lower() == "mf":
            model = MF(sess, dataset)

        elif recommender.lower() == "fpmc":
            model = FPMC(sess, dataset)

        elif recommender.lower() == "fpmcplus":
            model = FPMCplus(sess, dataset)

        elif recommender.lower() == "fism":
            model = FISM(sess, dataset)
def prune_train(model_name, weight_path, logdir, save_path, epoches):
    assert model_name in ['yolov3_tiny', 'yolov3', 'yolov4']

    physical_devices = tf.config.experimental.list_physical_devices('GPU')
    if len(physical_devices) > 0:
        tf.config.experimental.set_memory_growth(physical_devices[0], True)

    NUM_CLASS = len(utils.read_class_names(cfg.YOLO.CLASSES))
    STRIDES = np.array(cfg.YOLO.STRIDES)
    IOU_LOSS_THRESH = cfg.YOLO.IOU_LOSS_THRESH
    XYSCALE = cfg.YOLO.XYSCALE
    ANCHORS = utils.get_anchors(cfg.YOLO.ANCHORS)

    trainset = Dataset('train')

    isfreeze = False
    steps_per_epoch = len(trainset)
    first_stage_epochs = cfg.TRAIN.FISRT_STAGE_EPOCHS
    second_stage_epochs = cfg.TRAIN.SECOND_STAGE_EPOCHS

    global_steps = tf.Variable(1, trainable=False, dtype=tf.int64)
    warmup_steps = cfg.TRAIN.WARMUP_EPOCHS * steps_per_epoch
    total_steps = (first_stage_epochs + second_stage_epochs) * steps_per_epoch

    input_layer = tf.keras.layers.Input(
        [cfg.TRAIN.INPUT_SIZE, cfg.TRAIN.INPUT_SIZE, 3])
    if model_name == 'yolov3_tiny':
        feature_maps = YOLOv3_tiny(input_layer, NUM_CLASS)
        bbox_tensors = []
        for i, fm in enumerate(feature_maps):
            bbox_tensor = ops.decode_train(fm, NUM_CLASS, STRIDES, ANCHORS, i)
            bbox_tensors.append(fm)
            bbox_tensors.append(bbox_tensor)
        model = tf.keras.Model(input_layer, bbox_tensors)
    elif model_name == 'yolov3':
        feature_maps = YOLOv3(input_layer, NUM_CLASS)
        bbox_tensors = []
        for i, fm in enumerate(feature_maps):
            bbox_tensor = ops.decode_train(fm, NUM_CLASS, STRIDES, ANCHORS, i)
            bbox_tensors.append(fm)
            bbox_tensors.append(bbox_tensor)
        model = tf.keras.Model(input_layer, bbox_tensors)
    elif model_name == 'yolov4':
        feature_maps = YOLOv4(input_layer, NUM_CLASS)
        bbox_tensors = []
        for i, fm in enumerate(feature_maps):
            bbox_tensor = ops.decode_train(fm, NUM_CLASS, STRIDES, ANCHORS, i,
                                           XYSCALE)
            bbox_tensors.append(fm)
            bbox_tensors.append(bbox_tensor)
        model = tf.keras.Model(input_layer, bbox_tensors)
    else:
        raise ValueError

    if weight_path:
        if weight_path.split(".")[-1] == "weights":
            if model_name == 'yolov3_tiny':
                utils.load_weights_tiny(model, weight_path)
            elif model_name == 'yolov3':
                utils.load_weights_v3(model, weight_path)
            elif model_name == 'yolov4':
                utils.load_weights(model, weight_path)
            else:
                raise ValueError
        else:
            model.load_weights(weight_path)
        print('Restoring weights from: %s ... ' % weight_path)

    optimizer = tf.keras.optimizers.Adam(learning_rate=0.0001)

    if os.path.exists(logdir):
        shutil.rmtree(logdir)

    # for layer in model.layers:
    #     print(layer.name, isinstance(layer, tf.keras.layers.Conv2D))

    def apply_pruning_to_dense(layer):
        if isinstance(layer, tf.keras.layers.Conv2D):
            return tfmot.sparsity.keras.prune_low_magnitude(layer)
        return layer

    # Use `tf.keras.models.clone_model` to apply `apply_pruning_to_dense`
    # to the layers of the model.
    model_for_pruning = tf.keras.models.clone_model(
        model,
        clone_function=apply_pruning_to_dense,
    )
    # model_for_pruning.summary()

    unused_arg = -1
    model_for_pruning.optimizer = optimizer

    step_callback = tfmot.sparsity.keras.UpdatePruningStep()
    step_callback.set_model(model_for_pruning)

    log_callback = tfmot.sparsity.keras.PruningSummaries(
        log_dir=logdir)  # Log sparsity and other metrics in Tensorboard.
    log_callback.set_model(model_for_pruning)

    step_callback.on_train_begin()  # run pruning callback
    for epoch in range(epoches):
        log_callback.on_epoch_begin(epoch=unused_arg)  # run pruning callback

        for image_data, target in trainset:
            step_callback.on_train_batch_begin(
                batch=unused_arg)  # run pruning callback
            with tf.GradientTape() as tape:
                pred_result = model_for_pruning(image_data, training=True)
                giou_loss = conf_loss = prob_loss = 0

                # optimizing process
                for i in range(3):
                    conv, pred = pred_result[i * 2], pred_result[i * 2 + 1]
                    loss_items = ops.compute_loss(
                        pred,
                        conv,
                        target[i][0],
                        target[i][1],
                        STRIDES=STRIDES,
                        NUM_CLASS=NUM_CLASS,
                        IOU_LOSS_THRESH=IOU_LOSS_THRESH,
                        i=i)
                    giou_loss += loss_items[0]
                    conf_loss += loss_items[1]
                    prob_loss += loss_items[2]

                total_loss = giou_loss + conf_loss + prob_loss
                gradients = tape.gradient(
                    total_loss, model_for_pruning.trainable_variables)
                optimizer.apply_gradients(
                    zip(gradients, model_for_pruning.trainable_variables))

                tf.print(
                    "=> STEP %4d   lr: %.6f   giou_loss: %4.2f   conf_loss: %4.2f   "
                    "prob_loss: %4.2f   total_loss: %4.2f" %
                    (global_steps, optimizer.lr.numpy(), giou_loss, conf_loss,
                     prob_loss, total_loss))

        step_callback.on_epoch_end(batch=unused_arg)  # run pruning callback

    model_for_export = tfmot.sparsity.keras.strip_pruning(model_for_pruning)

    return model_for_export