Пример #1
0
def re_train_trained_model(s_data, is_image=False, extend_dim=True):
    # TESTING trained model
    model_path = "/home/migue/PycharmProjects/keras/modelos y features/meanMFCC en xccoverbl_split adam opt/meanMFCC_model.json"
    weight_path = "/home/migue/PycharmProjects/keras/modelos y features/meanMFCC en xccoverbl_split adam opt/meanMFCC_model_weights.h5"
    model = load_trainned_model(model_path, weight_path)

    opt = Adam()
    loss = "categorical_crossentropy" if len(
        s_data.classes) > 2 else "binary_crossentropy"
    model.compile(loss=loss, optimizer=opt, metrics=["accuracy"])

    if extend_dim:
        s_data.extend_feat_one_dim()  # if data is 1D

    trainX, testX, trainY, testY = split_features(s_data)

    loss, acc = model.evaluate(x=testX, y=testY)
    print("ACCURACY:", acc)
    print("LOSS:", loss)

    history = train_model(model,
                          s_data,
                          trainX,
                          testX,
                          trainY,
                          testY,
                          is_image=is_image)
    print("After re-train")
    loss, acc = model.evaluate(x=testX, y=testY)
    print("ACCURACY:", acc)
    print("LOSS:", loss)
Пример #2
0
def main(settings):
    features, minima, maxima, scaling_parameter = feature_extraction(
        settings.dataset_dir)
    window = 5
    X_train, y_train, X_test, y_test = split_features(features[::-1], window)
    print("X_train", X_train.shape)
    print("y_train", y_train.shape)
    print("X_test", X_test.shape)
    print("y_test", y_test.shape)

    # load json and create model
    layout_path = glob.glob(os.path.join(settings.model_dir,
                                         "*layout.json"))[0]
    json_file = open(layout_path, 'r')
    loaded_model_json = json_file.read()
    json_file.close()
    model = model_from_json(loaded_model_json)

    # load weights into new model
    weights_path = glob.glob(os.path.join(settings.model_dir,
                                          "*weights.h5"))[0]
    model.load_weights(weights_path)
    print("Loaded model from disk")

    predicted2 = model.predict(X_test)
    actual = y_test
    predicted2 = (predicted2 * scaling_parameter) + minima
    actual = (actual * scaling_parameter) + minima

    mape2 = sqrt(mean_absolute_percentage_error(predicted2, actual))
    mse2 = mean_absolute_error(actual, predicted2)

    print(json.dumps({"mape": mape2, "mse": mse2}))
Пример #3
0
def main(settings):
    features, minima, maxima, scaling_parameter = feature_extraction(settings.dataset_dir)
    window = 5
    X_train, y_train, X_test, y_test = split_features(features[::-1], window)
    print("X_train", X_train.shape)
    print("y_train", y_train.shape)
    print("X_test", X_test.shape)
    print("y_test", y_test.shape)

    json_logging_callback = LambdaCallback(
        on_epoch_end=lambda epoch, logs: print(json.dumps({
            "epoch": epoch,
            "loss": logs["loss"],
            "acc": logs["acc"],
            "val_loss": logs["val_loss"],
            "val_acc": logs["val_acc"],
        })),
    )

    # figure out which model architecture to use
    arch = settings.model_architecture
    assert arch in model_architectures, "Unknown model architecture '%s'." % arch
    builder = model_architectures[arch]

    # build and train the model
    model = builder([len(features.columns), window, 1])
    model.fit(
        X_train,
        y_train,
        batch_size=settings.batch_size,
        epochs=settings.epochs,
        validation_split=settings.validation_split,
        callbacks=[json_logging_callback],
        verbose=0)

    # serialize model to JSON
    model_json = model.to_json()
    with open(os.path.join(settings.output_dir, "model-layout.json"), "w") as json_file:
        json_file.write(model_json)

    # serialize weights to HDF5
    model.save_weights(os.path.join(settings.output_dir, "model-weights.h5"))
    print("Saved model to disk")
Пример #4
0
    # s_data = initialize_sound_data(p, features, get_features=False, is_image=True, filter_args={}) # load images
    # s_data = initialize_sound_data(p, features, get_features=True, is_image=False, filter_args={}, use_carlos_feat=True)

    # Test with images
    # model = LeNet.build(s_data.feat_amount, len(s_data.classes))
    # model = CIFAR.build(s_data.feat_amount, len(s_data.classes))

    # Test with 2D data
    # s_data.extend_feat_one_dim() # if data is 1D
    # model = DeepNet.Conv(s_data.feat_amount, len(s_data.classes))
    # model = DeepNet_WithOut_Dropout.Conv(s_data.feat_amount, len(s_data.classes))

    # Cualquier tipo de datos
    model = DeepNet.NotConv(s_data.feat_amount, len(s_data.classes))

    trainX, testX, trainY, testY = split_features(s_data)

    t1 = time()
    # train with images Adam optimizer
    # history = train_model(model, s_data, trainX, testX, trainY, testY)
    # no train with images, Adam optimizer
    history = train_model(model,
                          s_data,
                          trainX,
                          testX,
                          trainY,
                          testY,
                          is_image=False)
    t2 = time()
    print("Demora : " + str(t2 - t1) + " segundos")
    save_plots(model, s_data, history, testX, testY, "adam")
Пример #5
0
    def fitness(num_pooling_layers, stride_size, kernel_size, no_filters,
                num_encdec_layers, batch_size, learning_rate, drop_rate_1):
        init = time.perf_counter()
        print("fitness>>>")
        CNN_LSTM.no_calls_fitness += 1
        print("Number of calls to fitness", CNN_LSTM.no_calls_fitness)

        n_steps = EncDec.parameters.get_n_steps()
        n_features = EncDec.parameters.get_n_features()

        n_seq = EncDec.parameters.get_n_seq()
        n_input = EncDec.parameters.get_n_input()
        normal_sequence = EncDec.normal_sequence
        normal_sequence = utils.fit_transform_data(normal_sequence)

        folds = split_folds(normal_sequence, n_folds=3)
        print("len folds", len(folds))
        all_losses = list()
        for fold in folds:
            normal_sequence = fold
            X_train_full, y_train_full = utils.generate_full(
                normal_sequence,
                n_steps,
                input_form=CNN_LSTM.input_form,
                output_form=CNN_LSTM.output_form,
                n_seq=n_seq,
                n_input=n_input,
                n_features=n_features)
            config = [
                num_pooling_layers, stride_size, kernel_size, no_filters,
                num_encdec_layers, batch_size, learning_rate, drop_rate_1
            ]
            print("Num pooling layers", num_pooling_layers)
            print("Stride size", stride_size)
            print("Kernel size", kernel_size)
            print("No filters", no_filters)
            print("EncDec layers", num_encdec_layers)
            print("Batch size", batch_size)
            print("Learning rate", learning_rate)
            print("Drop rate", drop_rate_1)
            input_data = utils.split_features(
                CNN_LSTM.parameters.get_n_features(), X_train_full)
            model = CNN_LSTM.type_model_func(X_train_full, y_train_full,
                                             config)

            X_train, y_train, X_val_1, y_val_1, X_val_2, y_val_2 = utils.generate_sets(
                normal_sequence,
                n_steps,
                input_form=CNN_LSTM.input_form,
                output_form=CNN_LSTM.output_form,
                validation=True,
                n_seq=CNN_LSTM.parameters.get_n_seq(),
                n_input=CNN_LSTM.parameters.get_n_input(),
                n_features=CNN_LSTM.parameters.get_n_features())
            es = EarlyStopping(monitor='val_loss',
                               min_delta=0.01,
                               mode='min',
                               verbose=1)
            input_data = list()

            print("CNN_LSTM TYPE", CNN_LSTM.type_model)
            hist = None
            if CNN_LSTM.type_model == "multi-channel":
                hist = model.fit(X_train,
                                 y_train,
                                 validation_data=(X_val_1, y_val_1),
                                 epochs=100,
                                 batch_size=batch_size,
                                 callbacks=[es])

            elif CNN_LSTM.type_model == "multi-head":
                input_data = utils.split_features(
                    CNN_LSTM.parameters.get_n_features(), X_train)
                hist = model.fit(input_data,
                                 y_train,
                                 validation_data=(X_val_1, y_val_1),
                                 epochs=200,
                                 batch_size=batch_size,
                                 callbacks=[es])

            loss = hist.history['val_loss'][-1]
            all_losses.append(loss)

        mean_loss = np.mean(np.array(all_losses))
        loss = mean_loss
        del model

        clear_session()
        tensorflow.compat.v1.reset_default_graph()

        end = time.perf_counter()
        diff = end - init

        return loss, diff
Пример #6
0
def stack_models(models,
                 sound_data,
                 remove_outputs,
                 is_image=False,
                 sgd_opt=False,
                 extend_feat_dim=False):
    if not isinstance(extend_feat_dim, list):
        extend_feat_dim = [extend_feat_dim] * len(models)
    s_data = sound_data.clone()
    histories = []
    for i in range(len(models) - 1):
        if extend_feat_dim[i]:
            s_data.extend_feat_one_dim()
        trainX, testX, trainY, testY = split_features(s_data)

        print("Comienza el entranamiento del modelo " + str(i + 1))
        model = models[i]
        h = train_model(model,
                        s_data,
                        trainX,
                        testX,
                        trainY,
                        testY,
                        is_image=is_image,
                        sgd_opt=sgd_opt)
        histories.append(h)

        # Despues de entrenado, eliminar las capas asociadas a la clasificacion (de salida)
        if len(model.layers) < remove_outputs[i]:
            raise TypeError('There are no enough layers in the model.')
        else:
            for aux_var in range(remove_outputs[i]):
                model.pop()

        dic = models[i + 1].layers[0].get_config()
        aux_dim = []
        aux_dim.extend(model.output_shape)
        if extend_feat_dim[
                i + 1]:  # if the next model needs to extend features, do it
            aux_dim.append(1)
        dic['batch_input_shape'] = aux_dim

        aux_model = Sequential()
        aux_model.add(models[i + 1].layers[0].from_config(dic))
        # models[i + 1].layers[0] = models[i + 1].layers[0].from_config(dic)
        for l in models[i + 1].layers[1:]:
            config = l.from_config(l.get_config())
            config.name += str(i + 1)
            aux_model.add(config)
        models[i + 1] = aux_model

        s_data.update_features(model.predict)

    if extend_feat_dim[-1]:
        s_data.extend_feat_one_dim()
    trainX, testX, trainY, testY = split_features(s_data)
    print("Comienza el entranamiento del modelo " + str(len(models)))
    h = train_model(models[-1],
                    s_data,
                    trainX,
                    testX,
                    trainY,
                    testY,
                    is_image=is_image,
                    sgd_opt=sgd_opt)
    histories.append(h)
    for i, h in enumerate(histories):
        n = -1
        print("Model " + str(i + 1) + " => loss: " +
              str(h.history['loss'][n]) + " acc: " + str(h.history['acc'][n]) +
              " val_loss: " + str(h.history['val_loss'][n]) + " val_acc: " +
              str(h.history['val_acc'][n]))

    # saving history
    save_plots(models[-1], s_data, h, testX, testY, "descenso por gradiente")

    return h