def main(data_path, output_path):

    X_trainS1, Y_train, X_valS1, Y_val = load_data(data_path)

    epochs = 10
    batch_size = 256
    dropout_rate = 0.15
    n_classes = 6

    # 三个子模型的输入数据
    main_input1 = Input(shape=(128, 3), name='main_input1')

    def lstm_cell(main_input):
        """
        基于DeepConvLSTM算法, 创建子模型
        :param main_input: 输入数据
        :return: 子模型
        """
        sub_model = TimeDistributed(Dense(384),
                                    input_shape=(128, 3))(main_input)
        #       sub_model = Flatten()(main_input)
        print(sub_model)
        sub_model = LSTM(256, return_sequences=True)(sub_model)

        sub_model = LSTM(128, return_sequences=True)(sub_model)

        sub_model = LSTM(128)(sub_model)

        main_output = Dropout(dropout_rate)(sub_model)

        return main_output

    model = lstm_cell(main_input1)

    model = Dropout(0.4)(model)
    model = Dense(n_classes)(model)
    model = BatchNormalization()(model)
    output = Activation('softmax', name="softmax")(model)

    model = Model([main_input1], output)
    model.compile(loss='categorical_crossentropy',
                  optimizer='rmsprop',
                  metrics=['accuracy'])
    print(model.summary())

    #    graph_path = os.path.join(output_path, "merged_model.png")
    #    plot_model(model, to_file=graph_path, show_shapes=True)  # 绘制模型图

    metrics = Metrics()  # 度量FPR
    history = model.fit(X_trainS1,
                        Y_train,
                        batch_size=batch_size,
                        validation_data=(X_valS1, Y_val),
                        epochs=epochs,
                        callbacks=[metrics])  # 增加FPR输出

    model_path = os.path.join(output_path, "merged_dcl.h5")
    model.save(model_path)  # 存储模型
    print(history.history)
Exemplo n.º 2
0
def main(data_path, output_path):

    X_trainS1, Y_train, X_valS1, Y_val = load_data(data_path)

    epochs = 10
    batch_size = 256
    kernel_size = 3
    pool_size = 2
    dropout_rate = 0.15
    n_classes = 6

    f_act = 'relu'

    # 三个子模型的输入数据
    main_input1 = Input(shape=(128, 3), name='main_input1')

    def cnn_lstm_cell(main_input):
        """
        基于DeepConvLSTM算法, 创建子模型
        :param main_input: 输入数据
        :return: 子模型
        """
        sub_model = Conv1D(512,
                           kernel_size,
                           input_shape=(128, 3),
                           activation=f_act,
                           padding='same')(main_input)
        print('sub_model512:', sub_model)
        sub_model = BatchNormalization()(sub_model)
        sub_model = MaxPooling1D(pool_size=pool_size)(sub_model)
        print('sub_model:', sub_model)
        sub_model = Dropout(dropout_rate)(sub_model)
        print('sub_model:', sub_model)
        sub_model = Conv1D(64, kernel_size, activation=f_act,
                           padding='same')(sub_model)
        print('sub_model64:', sub_model)
        sub_model = BatchNormalization()(sub_model)
        sub_model = MaxPooling1D(pool_size=pool_size)(sub_model)
        print('sub_model:', sub_model)
        sub_model = Dropout(dropout_rate)(sub_model)
        print('sub_model:', sub_model)
        sub_model = Conv1D(32, kernel_size, activation=f_act,
                           padding='same')(sub_model)
        print('sub_model32:', sub_model)
        sub_model = BatchNormalization()(sub_model)
        sub_model = MaxPooling1D(pool_size=pool_size)(sub_model)
        print('sub_model:', sub_model)

        sub_model = LSTM(128, return_sequences=True)(sub_model)
        print('sub_model128_1:', sub_model)

        sub_model = LSTM(128, return_sequences=True)(sub_model)
        print('sub_model128_2:', sub_model)
        sub_model = LSTM(128)(sub_model)
        print('sub_model128_3:', sub_model)
        main_output = Dropout(dropout_rate)(sub_model)
        print('main_output:', main_output)
        return main_output

    model = cnn_lstm_cell(main_input1)

    model = Dropout(0.4)(model)
    model = Dense(n_classes)(model)
    model = BatchNormalization()(model)
    output = Activation('softmax', name="softmax")(model)

    model = Model([main_input1], output)
    model.compile(loss='categorical_crossentropy',
                  optimizer='rmsprop',
                  metrics=['accuracy'])

    #    graph_path = os.path.join(output_path, "merged_model.png")
    #    plot_model(model, to_file=graph_path, show_shapes=True)  # 绘制模型图

    metrics = Metrics()  # 度量FPR
    history = model.fit(X_trainS1,
                        Y_train,
                        batch_size=batch_size,
                        validation_data=(X_valS1, Y_val),
                        epochs=epochs,
                        callbacks=[metrics])  # 增加FPR输出

    model_path = os.path.join(output_path, "merged_dcl.h5")
    model.save(model_path)  # 存储模型
    print(history.history)
Exemplo n.º 3
0
        start_time = time.time()
        # Fit model
        model.fit(x_train,
                  y_train,
                  batch_size=16,
                  validation_data=(x_valid, y_valid),
                  epochs=epochs,
                  shuffle=True,
                  verbose=2,
                  callbacks=[metrics])
        # Get time
        total_train_time = time.time() - start_time
        print('Total training time in seconds: ')
        print(total_train_time)
        # Save model to file
        model.save(os.path.join('.', 'trained_model.h5'))

        #    Plotting Epoch/accuracy
        #    print (metrics.acc)
        #    plt.plot(metrics.acc)
        #    plt.plot(metrics.val_acc,color='red')
        #    plt.xlabel('epochs')
        #    plt.ylabel('accuracy')
        #    plt.show()

        #Final evaluation of the model
        score_val = model.evaluate(x_valid, y_valid, verbose=0)
        print("Accuracy on Validation data:  %.2f%%" % (score_val[1] * 100))
        score_train = model.evaluate(x_train, y_train, verbose=0)
        print("Accuracy on Train data:  %.2f%%" % (score_train[1] * 100))
Exemplo n.º 4
0
    def __init__(self, model_name="Xception", train_class_name=None, training_batch_size=100, existing_weight=None, test_percentage=0.02, learning_rate=0.000, validation_every_X_batch=5):

        if train_class_name == None:
            print("You must specify train_class_name")
            return

        self.validation_every_X_batch = validation_every_X_batch
        self.Y = []

        self.model_file = model_name + "-{date:%Y-%m-%d-%H-%M-%S}".format( date=datetime.datetime.now())
        print("model_folder: ", self.model_file)

        self.train_class_name = train_class_name
        if not os.path.exists(os.path.join("models", train_class_name)):
            os.makedirs(os.path.join("models", train_class_name))

        self.training_batch_size = training_batch_size

        # We know that MNIST images are 28 pixels in each dimension.
        img_size = 512

        self.img_size_flat = img_size * img_size * 3

        self.img_shape_full = (img_size, img_size, 3)

        self.test = {}

        with open('base/Annotations/label.csv', 'r') as csvfile:
            reader = csv.reader(csvfile)
            all_class_samples = []
            for row in reader:
                if row[1] == self.train_class_name:
                    self.num_classes = len(row[2])
                    break

        # Start construction of the Keras Sequential model.
        input_tensor = Input(shape=self.img_shape_full)
        if model_name == "Xception":
            base_model = Xception(input_tensor=input_tensor, weights='imagenet', include_top=False, classes=self.num_classes)
        elif model_name == "MobileNet":
            base_model = MobileNet(input_tensor=input_tensor, weights='imagenet', include_top=False, classes=self.num_classes)
        elif model_name == "DenseNet121":
            base_model = DenseNet121(input_tensor=input_tensor, weights='imagenet', include_top=False, classes=self.num_classes)


        x = base_model.output
        x = GlobalAveragePooling2D()(x)
        model = Dropout(0.2)(x)
        predictions = Dense(self.num_classes, activation='softmax')(x)

        # this is the model we will train
        model = Model(inputs=base_model.input, outputs=predictions)

        self.model = model
        print(model.summary())

        self.optimizer = optimizers.Adam(lr=learning_rate)

        model.compile(optimizer=self.optimizer, loss='categorical_crossentropy', metrics=['accuracy'])


        with open('base/Annotations/label.csv', 'r') as csvfile:
            reader = csv.reader(csvfile)
            all_class_samples = []
            for row in reader:
                if row[1] != self.train_class_name:
                    continue
                all_class_samples.append(row)

            self.X = np.zeros((len(all_class_samples), img_size, img_size, 3))
            # self.X = np.zeros((10, img_size, img_size, 3))
            test_count = int(test_percentage * len(all_class_samples))
            index = 0
            print("Training " + train_class_name + " with: " + str(int((1 - test_percentage) * len(all_class_samples))) + ", Testing with: " + str(test_count), str(self.num_classes), "Classes")
            print("Loading images...")
            for row in all_class_samples:
                image = Image.open("base/" + row[0])
                img_array = np.asarray(image)
                if img_array.shape != self.img_shape_full:
                    image = image.resize((img_size, img_size), Image.ANTIALIAS)
                    img_array = np.asarray(image)
                self.X[index] = img_array
                self.Y.append(row[2].index("y"))
                if index % 500 == 0:
                    print(index)
                index += 1

        self.Y = to_categorical(self.Y, num_classes=self.num_classes)

        self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(self.X, self.Y, test_size = test_percentage, random_state=42)
        del self.X
        class_weight = {}
        class_count = np.sum(self.y_train, axis=0)
        print("Training Sample for each Class", class_count)
        for class_index in range(self.num_classes):
            class_weight[class_index] = 1 /(class_count[class_index] / np.sum(class_count)) / self.num_classes
        self.class_weight = class_weight
        print("Class weights: ", self.class_weight)
        os.makedirs(os.path.join("models", train_class_name, self.model_file))
        model.save(os.path.join("models", train_class_name, self.model_file, train_class_name + "_" + "model.h5"))
Exemplo n.º 5
0
    vgg = cnn_algo()
    # vgg.summary()

    for layers in vgg.layers:  # we are keeping the weights constant, it is already trained by the thousands of weights that is in imagenet
        layers.trainable = False  # we are not training all the layers

    x = Flatten()(
        vgg.output
    )  # need to add last layer, after flattning we can add the last layer
    folders = glob.glob(
        cwd + '/train/*')  # check number of folders inside training folder
    print(folders)

    prediction = Dense(len(folders), activation='softmax')(
        x)  # its the sigmoid activation function
    model = Dropout(0.2)
    model = input_model()
    model.summary()  # view the structure of model
    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])

    training_set = aggumentation_training()
    test_set = aggumentation_training()
    r = model.fit_generator(training_set,
                            epochs=3,
                            steps_per_epoch=len(training_set),
                            validation_steps=len(test_set))
    model.save('My_face_features_model.h5')