Ejemplo n.º 1
0
    def model_run(self, path):
        csv_file = pd.read_csv(path + '/dataset.csv')
        Y_data = csv_file[csv_file.columns[-1]]
        X_data = csv_file.iloc[:, :-1]

        #X_data = X_data.drop('Date', axis=1)

        pre_model = self.model_create(len(X_data.columns))
        X_train, X_test, y_train, y_test = train_test_split(X_data,
                                                            Y_data,
                                                            test_size=0.2)

        pre_model.fit(X_train,
                      y_train,
                      epochs=int(self.epochs),
                      batch_size=10,
                      verbose=0)

        plot_model(pre_model,
                   to_file=path + '/result.png',
                   show_shapes=True,
                   show_layer_names=True)

        _, y_pred = pre_model.evaluate(X_test, y_test, verbose=0)
        print('%.3f' % (y_pred * 100), '%')
    def test_plot_model_with_add_loss(self):
        inputs = keras.Input(shape=(None, 3))
        outputs = keras.layers.Dense(1)(inputs)
        model = keras.Model(inputs, outputs)
        model.add_loss(math_ops.reduce_mean(outputs))
        dot_img_file = 'model_3.png'
        try:
            vis_utils.plot_model(model,
                                 to_file=dot_img_file,
                                 show_shapes=True,
                                 show_dtype=True,
                                 expand_nested=True)
            self.assertTrue(file_io.file_exists(dot_img_file))
            file_io.delete_file(dot_img_file)
        except ImportError:
            pass

        model = keras.Sequential(
            [keras.Input(shape=(None, 3)),
             keras.layers.Dense(1)])
        model.add_loss(math_ops.reduce_mean(model.output))
        dot_img_file = 'model_4.png'
        try:
            vis_utils.plot_model(model,
                                 to_file=dot_img_file,
                                 show_shapes=True,
                                 show_dtype=True,
                                 expand_nested=True)
            self.assertTrue(file_io.file_exists(dot_img_file))
            file_io.delete_file(dot_img_file)
        except ImportError:
            pass
Ejemplo n.º 3
0
def buildClassifier(input_shape=(100, 100, 3)):
    # Initialising the CNN
    classifier = Sequential()
    classifier.add(Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=input_shape, padding='same'))
    classifier.add(MaxPooling2D(pool_size=(4, 4), padding='same'))
    classifier.add(Dropout(0.5))  # added extra Dropout layer
    classifier.add(Conv2D(64, (3, 3), activation='relu', padding='same'))
    classifier.add(MaxPooling2D(pool_size=(2, 2), padding='same'))
    classifier.add(Conv2D(128, (3, 3), padding='same', activation='relu'))
    classifier.add(Dropout(0.5))  # added extra dropout layer
    classifier.add(Conv2D(256, (3, 3), activation='relu', padding='same'))
    classifier.add(MaxPooling2D(pool_size=(2, 2), padding='same'))
    classifier.add(Dropout(0.2))  # antes era 0.25
    classifier.add(Conv2D(512, (3, 3), padding='same', activation='relu'))
    classifier.add(Conv2D(1024, (3, 3), activation='relu', padding='same'))
    classifier.add(MaxPooling2D(pool_size=(2, 2), padding='same'))
    classifier.add(Dense(units=1024, activation='relu'))  # added new dense layer
    classifier.add(Dropout(0.2))  # antes era 0.25
    # Step 3 - Flattening
    classifier.add(Flatten())
    classifier.add(Dense(units=1024, activation='relu'))  # added new dense layer
    classifier.add(Dense(units=256, activation='relu'))  # added new dense layer
    # Step 4 - Full connection
    classifier.add(Dropout(0.2))
    classifier.add(Dense(units=1, activation='sigmoid'))
    classifier.summary()

    # Compiling the CNN
    classifier.compile(optimizer='adam',
                       loss='binary_crossentropy',
                       metrics=['accuracy'])
    plot_model(classifier, to_file='model_plot.png', show_shapes=True, show_layer_names=True)
    return classifier
    def model(self):
        # Don't train the discriminator model weights
        self.discriminator.trainable = False

        # Send the image to the generator model
        generator_output = self.generator(self.input_image)

        # Send the actual input and generator output to discriminator model
        discriminator_out = self.discriminator(
            [self.input_image, generator_output])

        #  Final Model
        model = Model(self.input_image, [discriminator_out, generator_output])
        optimizer = Adam(lr=0.0002, beta_1=0.5)
        model.compile(loss=['binary_crossentropy', 'mae'],
                      optimizer=optimizer,
                      loss_weights=[1, 100])
        print(
            "\n******************************************* GAN Model ********************************************"
        )
        print(model.summary())
        plot_model(model,
                   "modelplots/pix2pix/gan.png",
                   show_shapes=True,
                   show_layer_names=True)
        return model
Ejemplo n.º 5
0
def lstm(hparams, input_dimension, output_dimension=300, max_length):
    step = X_train.shape[1]
    model = Sequential()
    model.add(
        Embedding(input_dimension, output_dimension, input_length=max_length))
    model.add(Dropout(0.25))
    model.add(
        Bidirectional(
            LSTM(input_shape=(step, 1), units=10, activation="softmax")))
    model.add(
        Bidirectional(
            LSTM(input_shape=(step, 1), units=10, activation="softmax")))
    model.add(Dense(20, activation="softmax"))
    model.add(Activation('softmax'))
    model.compile(loss='mean_squared_error',
                  optimizer='rmsprop',
                  metrics=['acc'])
    model.summary()
    plot_model(model,
               to_file='model_plot.png',
               show_shapes=True,
               show_layer_names=True)

    model.fit(X_train,
              y_train,
              epochs=epochs,
              batch_size=hparams[HP_BATCH_SIZE]
              )  # Run with 1 epoch to speed things up for demo purposes
    test_predictions = model.predict(X_test)
    _, accuracy = model.evaluate(X_test, y_test)

    return accuracy
Ejemplo n.º 6
0
def save_model(model_name, model, model_args, kwargs):
    """Saves a model to a folder. Also saves the configuration files necessary to load the model.
    """
    paths = list(
        map(lambda f: int(f[6:]),
            filter(lambda s: s.startswith('model_'), os.listdir('.'))))
    if len(paths) == 0:
        folder = 'model_0'
    else:
        folder = 'model_%d' % (max(paths) + 1)
    os.mkdir(folder)
    model.save('%s/%s_sensor_%d_model.h5' %
               (folder, model_name, kwargs['sensor_id']))
    plot_model(model,
               '%s/%s_sensor_%d_model.png' %
               (folder, model_name, kwargs['sensor_id']),
               show_shapes=True)
    with open(
            "%s/%s_sensor_%d_model_args.json" %
        (folder, model_name, kwargs['sensor_id']), 'w') as f:
        json.dump(model_args, f)
    with open(
            "%s/%s_sensor_%d_kwargs.json" %
        (folder, model_name, kwargs['sensor_id']), 'w') as f:
        json.dump(kwargs, f)
    return folder
Ejemplo n.º 7
0
def main():

    # plt.figure(figsize=(500, 500))
    # plt.imshow(x_train[5])
    # plt.show()
    train_ds, test_ds, races, train_amount, test_amount = dataset.load_dataset_tf(
    )
    train_ds = train_ds.shuffle(1200).repeat().batch(BATCH_SIZE)
    test_ds = test_ds.shuffle(test_amount).repeat().batch(BATCH_SIZE)
    STEPS_PER_EPOCH = ceil(train_amount / BATCH_SIZE)

    # for rnum in enumerate(my_ds.take(1)):
    model, model_name = race_model(len(races))
    model.summary()
    tb_callback = TensorBoard('../logs/' + model_name)
    model.fit(train_ds,
              epochs=5,
              steps_per_epoch=STEPS_PER_EPOCH,
              callbacks=[tb_callback])
    plot_model(model,
               '../models/' + model_name + '.png',
               show_shapes=True,
               show_layer_names=True)
    model.save('../models/' + model_name)
    test_loss, test_acc = model.evaluate(test_ds,
                                         steps=ceil(test_amount / BATCH_SIZE))

    print('Test accuracy:', test_acc)
 def test_plot_model_with_wrapped_layers_and_models(self):
     inputs = keras.Input(shape=(None, 3))
     lstm = keras.layers.LSTM(6, return_sequences=True, name='lstm')
     x = lstm(inputs)
     # Add layer inside a Wrapper
     bilstm = keras.layers.Bidirectional(
         keras.layers.LSTM(16, return_sequences=True, name='bilstm'))
     x = bilstm(x)
     # Add model inside a Wrapper
     submodel = keras.Sequential(
         [keras.layers.Dense(32, name='dense', input_shape=(None, 32))])
     wrapped_dense = keras.layers.TimeDistributed(submodel)
     x = wrapped_dense(x)
     # Add shared submodel
     outputs = submodel(x)
     model = keras.Model(inputs, outputs)
     dot_img_file = 'model_2.png'
     try:
         vis_utils.plot_model(model,
                              to_file=dot_img_file,
                              show_shapes=True,
                              show_dtype=True,
                              expand_nested=True)
         self.assertTrue(file_io.file_exists(dot_img_file))
         file_io.delete_file(dot_img_file)
     except ImportError:
         pass
def plot_subclass_model():
    # https://stackoverflow.com/questions/61427583/how-do-i-plot-a-keras-tensorflow-subclassing-api-model
    Network().build_graph(in_shape=(height, width, input_channels)).summary()
    plot_model(Network().build_graph(in_shape=(height, width, input_channels)),
               show_shapes=True,
               show_layer_names=True,
               to_file="archi.png")
    print("archi.png created")
Ejemplo n.º 10
0
 def build_and_train(cls,
                     X: np.array,
                     y: np.array,
                     model_path: str,
                     epochs: int = EPOCHS):
     model = cls.build_model(X.shape[2])
     plot_model(model, show_shapes=True)
     # model, history = cls.train_model(model, X, y, model_path, epochs)
     # show_history(history, cls.__class__.__name__)
     return cls(model)
Ejemplo n.º 11
0
def buildClassifier(input_shape=(100, 100, 3)):
    """
    This creates the CNN algorithm.
    Args:
        input_shape(tuple): This is the image shape of (100,100,3)
    Returns:
        classifier(sequential): This is the sequential model.
    """
    # Initialising the CNN
    opt = Adam(lr=0.0002)  # lr = learning rate
    classifier = Sequential()
    classifier.add(
        Conv2D(32,
               kernel_size=(3, 3),
               activation='relu',
               input_shape=input_shape,
               padding='same'))
    classifier.add(MaxPooling2D(pool_size=(3, 3), padding='same'))
    classifier.add(Dropout(0.5))  # added extra Dropout layer
    classifier.add(Conv2D(64, (3, 3), activation='relu', padding='same'))
    classifier.add(MaxPooling2D(pool_size=(2, 2), padding='same'))
    classifier.add(Conv2D(128, (3, 3), padding='same', activation='relu'))
    classifier.add(Dropout(0.5))  # added extra dropout layer
    classifier.add(Conv2D(256, (3, 3), activation='relu', padding='same'))
    classifier.add(MaxPooling2D(pool_size=(2, 2), padding='same'))
    classifier.add(Dropout(0.2))  # antes era 0.25
    classifier.add(Conv2D(512, (3, 3), padding='same', activation='relu'))
    classifier.add(Conv2D(1024, (3, 3), activation='relu', padding='same'))
    classifier.add(MaxPooling2D(pool_size=(2, 2), padding='same'))
    classifier.add(
        Flatten())  # This is added before dense layer a flatten is needed
    classifier.add(Dense(units=1024,
                         activation='relu'))  # added new dense layer
    classifier.add(Dropout(0.2))  # antes era 0.25
    # Step 3 - Flattening
    #classifier.add(Flatten())
    classifier.add(Dense(units=1024,
                         activation='relu'))  # added new dense layer
    classifier.add(Dense(units=256,
                         activation='relu'))  # added new dense layer
    # Step 4 - Full connection
    classifier.add(Dropout(0.2))
    classifier.add(Dense(units=1, activation='sigmoid'))
    classifier.summary()

    # Compiling the CNN
    classifier.compile(optimizer=opt,
                       loss='binary_crossentropy',
                       metrics=['accuracy'])
    plot_model(classifier,
               to_file='model_plot.png',
               show_shapes=True,
               show_layer_names=True)
    return classifier
Ejemplo n.º 12
0
def run_cnn(data_loader_func,
            batch_size=EPOCHS,
            epochs=BATCH_SIZE,
            with_augmentation=True):
    test_name = str(USED_MODEL_NUMBER) + '_epoch' + str(
        EPOCHS) + '_batch' + str(BATCH_SIZE)
    clear_logs()
    print('------------- CNN model  ')
    print('------------- Loading data  ')
    X_train, y_train, X_test, y_test, X_val, y_val = pre_processing_dataset(
        *data_loader_func())
    log('Test name: ' + test_name + "\n")
    log('Batch size: ' + str(batch_size))
    log('Epochs: ' + str(epochs))
    log('Started data size qty: ' + str(X_train.shape[0]))
    print('------------- Creating model  ')
    cnn_model = create_model()
    print('------------- Saving model image ')
    plot_model(cnn_model,
               to_file=MODELS_PATH + MODEL_IMG_PREFIX + test_name + ".png")
    print('------------- Compiling model  ')
    compile_model(cnn_model)
    start_time = time.time()
    print('------------- Training model')
    augm_gen_func = augm_gen if with_augmentation else None
    history = feed_model(cnn_model,
                         X_train,
                         y_train,
                         X_val,
                         y_val,
                         batch_size,
                         epochs,
                         augm_gen_func=augm_gen_func)
    print('------------- Saving data fitting history graphs ')
    augm_suffix = "_augm" if with_augmentation else "_norm"
    plot_history_graphs(history, MODELS_PATH,
                        HISTORY_IMG_PREFIX + test_name + augm_suffix)
    print('-------------  Making labels predictions for test data')
    predictions = make_prediction(cnn_model, X_test)
    print('------------- Predicting labels for test data ')
    predicted_labels = predict_labels(predictions)
    print('------------- Saving prediction results to file  ')
    save_labels_to_csv(predicted_labels, LOGS_PATH,
                       PREDICTIONS_PREFIX + test_name)
    print('------------- Evaluating accuracy  ')
    loss, accuracy = evaluate_accuracy(cnn_model, X_test, y_test)
    log('Prediction accuracy: ' + str(round(accuracy * 100, 2)) + '% \n' +
        'Prediction loss: ' + str(round(loss, 2)) + '\n' +
        'Total calculation time: ' +
        str(convert_time(time.time() - start_time)))
    log_printer(log_text, LOGS_PATH + LOG_PREFIX, test_name)
    return predictions, predicted_labels, test_name
Ejemplo n.º 13
0
 def test_plot_model_rnn(self):
   model = keras.Sequential()
   model.add(
       keras.layers.LSTM(
           16, return_sequences=True, input_shape=(2, 3), name='lstm'))
   model.add(keras.layers.TimeDistributed(keras.layers.Dense(5, name='dense')))
   dot_img_file = 'model_2.png'
   try:
     vis_utils.plot_model(model, to_file=dot_img_file, show_shapes=True)
     self.assertTrue(file_io.file_exists(dot_img_file))
     file_io.delete_file(dot_img_file)
   except ImportError:
     pass
Ejemplo n.º 14
0
 def test_plot_model_cnn(self):
   model = keras.Sequential()
   model.add(
       keras.layers.Conv2D(
           filters=2, kernel_size=(2, 3), input_shape=(3, 5, 5), name='conv'))
   model.add(keras.layers.Flatten(name='flat'))
   model.add(keras.layers.Dense(5, name='dense'))
   dot_img_file = 'model_1.png'
   try:
     vis_utils.plot_model(model, to_file=dot_img_file, show_shapes=True)
     self.assertTrue(file_io.file_exists(dot_img_file))
     file_io.delete_file(dot_img_file)
   except ImportError:
     pass
Ejemplo n.º 15
0
 def test_plot_model_cnn(self):
   model = keras.Sequential()
   model.add(
       keras.layers.Conv2D(
           filters=2, kernel_size=(2, 3), input_shape=(3, 5, 5), name='conv'))
   model.add(keras.layers.Flatten(name='flat'))
   model.add(keras.layers.Dense(5, name='dense'))
   dot_img_file = 'model_1.png'
   try:
     vis_utils.plot_model(model, to_file=dot_img_file, show_shapes=True)
     self.assertTrue(file_io.file_exists(dot_img_file))
     file_io.delete_file(dot_img_file)
   except ImportError:
     pass
Ejemplo n.º 16
0
def save_nn_model(model, output_dir='.', model_name='model.h5'):
    """Save keras model's object, weights, architecture and graph image.

    Parameters
    ----------
    model : Instance of `keras Model`
        The neural network model you want to save.
    output_dir : str
        The directory where the model is saved.
    model_name : str, default 'model.h5'
        Model name with a h5(h5df) extension.

    Returns
    -------
    None
    """

    if not os.path.isdir(output_dir):
        os.makedirs(output_dir)

    # split model_name and extension name
    name, extension = os.path.splitext(model_name)
    # check extension is .h5 or .hdf5
    pattern = re.compile('.(h5|hdf5)')
    if not re.match(pattern, extension):
        extension = '.h5'
    # extension = '.h5' if not extension else extension

    # save complete model
    fullname = os.path.join(output_dir, model_name)
    model.save(fullname)
    print(model.name + ' saved as ' + fullname)

    # save model architecture
    fullname = os.path.join(output_dir, name + '_architecture.json')
    model_json = model.to_json()
    with open(fullname, "w") as json_file:
        json_file.write(model_json)
    print(model.name + ' saved as ' + fullname)

    # save model weights
    fullname = os.path.join(output_dir, name + '_weights' + extension)
    model.save_weights(fullname)
    print(model.name + ' saved as ' + fullname)

    # save simple graph image
    plot_model(model,
               to_file=os.path.join(output_dir, 'model_graph.png'),
               show_shapes=True)
Ejemplo n.º 17
0
 def plot_model(self, show_shapes=True, show_layer_names=True):
     """
     It creates a file with a plot of the model/network architecture. It
     shows the shapes and the layers if wanted.
     :param show_shapes: boolean that determines if shapes should be shown
     :param show_layer_names: boolean that determines if layer names should
     be shown
     """
     path = \
         os.path.join(self.fig_path, '{}_model_architecture_{}.png'.format(
             self.name, datetime.now().strftime('%Y%m%d%H%M%S')))
     plot_model(self.model,
                to_file=path,
                show_shapes=show_shapes,
                show_layer_names=show_layer_names)
Ejemplo n.º 18
0
 def test_plot_model_rnn(self):
     model = keras.Sequential()
     model.add(
         keras.layers.LSTM(16,
                           return_sequences=True,
                           input_shape=(2, 3),
                           name='lstm'))
     model.add(
         keras.layers.TimeDistributed(keras.layers.Dense(5, name='dense')))
     dot_img_file = 'model_2.png'
     try:
         vis_utils.plot_model(model, to_file=dot_img_file, show_shapes=True)
         self.assertTrue(file_io.file_exists(dot_img_file))
         file_io.delete_file(dot_img_file)
     except ImportError:
         pass
Ejemplo n.º 19
0
    def __init__(self, game):

        self.game = game
        self.shape = (game._base_board.height, game._base_board.width)
        self.input_board = Input(self.shape, dtype=float)
        self.possible_moves_size = self.shape[1]
        self.checkpoint_dir = "checkpoints"
        create_dir(self.checkpoint_dir)

        X = Reshape((self.shape[0], self.shape[1], 1))(self.input_board)
        h_conv1 = ReLU()(BatchNormalization(axis=3)(Conv2D(config.num_channels,
                                                           3,
                                                           padding='same',
                                                           use_bias=False)(X)))
        h_conv2 = ReLU()(BatchNormalization(axis=3)(Conv2D(
            config.num_channels, 3, padding='same', use_bias=False)(h_conv1)))
        h_conv3 = ReLU()(BatchNormalization(axis=3)(Conv2D(
            config.num_channels, 3, padding='valid', use_bias=False)(h_conv2)))
        h_conv4 = ReLU()(BatchNormalization(axis=3)(Conv2D(
            config.num_channels, 3, padding='valid', use_bias=False)(h_conv3)))
        h_conv4_flat = Reshape((config.num_channels * (self.shape[0] - 4) *
                                (self.shape[1] - 4), ))(h_conv4)
        s_fc1 = Dropout(config.dropout)(ReLU()(BatchNormalization(axis=1)(
            Dense(1024, use_bias=False)(h_conv4_flat))))
        s_fc2 = Dropout(config.dropout)(ReLU()(BatchNormalization(axis=1)(
            Dense(512, use_bias=False)(s_fc1))))

        self.pi = Dense(self.possible_moves_size,
                        activation='softmax',
                        name='pi')(s_fc2)
        self.v = Dense(1, activation='tanh', name='v')(s_fc2)

        self.target_pi = Input([None, self.possible_moves_size], dtype=float)
        self.target_v = Input([None], dtype=float)

        model = Model(inputs=self.input_board, outputs=(self.pi, self.v))
        model.compile(loss=total_loss, optimizer=Adam(config.lr))
        print(model.summary())
        plot_model(model,
                   "modelplots/model.png",
                   show_shapes=True,
                   show_layer_names=True)
        self.model = model
 def model(self):
     down1 = self.encoder(self.input_image, 64, batch_norm=False)
     down2 = self.encoder(down1, 128)
     down3 = self.encoder(down2, 256)
     down4 = self.encoder(down3, 512)
     down5 = self.encoder(down4, 512)
     down6 = self.encoder(down5, 512)
     down7 = self.encoder(down6, 512)
     # Not adding batch normalization and Relu to the bottle neck layer
     bottleneck = Conv2D(512, (4, 4),
                         strides=(2, 2),
                         padding='same',
                         kernel_initializer=self.init)(down7)
     bottleneck = Activation('relu')(bottleneck)
     # decoder model
     up1 = self.decoder(bottleneck, down7, 512)
     up2 = self.decoder(up1, down6, 512)
     up3 = self.decoder(up2, down5, 512)
     up4 = self.decoder(up3, down4, 512, dropout=False)
     up5 = self.decoder(up4, down3, 256, dropout=False)
     up6 = self.decoder(up5, down2, 128, dropout=False)
     up7 = self.decoder(up6, down1, 64, dropout=False)
     # output
     out = Conv2DTranspose(3, (4, 4),
                           strides=(2, 2),
                           padding='same',
                           kernel_initializer=self.init)(up7)
     out_image = Activation('tanh')(out)
     # define model
     model = Model(self.input_image, out_image)
     print(
         "\n**************************************** Generator Model *****************************************"
     )
     print(model.summary())
     plot_model(model,
                "modelplots/pix2pix/generator_model.png",
                show_shapes=True,
                show_layer_names=True)
     return model
def get_simple_cnn_model():
    '''
    创建cnn分类模型,输入为包含7个车牌字符的整张图像,每个字符的输出为分类one-hot编码。
    '''
    input_tensor = models.Input((image_height, image_width, 3))
    x = input_tensor

    for i in range(3):
        x = layers.Conv2D(32 * 2**i, (3, 3), activation='relu')(x)
        x = layers.Conv2D(32 * 2**i, (3, 3), activation='relu')(x)
        x = layers.MaxPool2D(pool_size=(2, 2))(x)

    x = layers.Flatten()(x)
    x = layers.Dropout(0.25)(x)

    x_list = []
    # 经过多个全连接层,收集所有输出结果,保存为list,7个元素表示7个字符的预测结果
    #xx = [layers.Reshape((-1,65))(layers.Dense(n_class ,activation='softmax', name='c%d'%(i+1))(x)) for i in range(plate_str_length)]
    #print("#### ", type(xx))
    for i in range(plate_str_length):
        xi = layers.Dense(n_class, activation='softmax',
                          name='c%d' % (i + 1))(x)
        xi = layers.Reshape((1, 65))(xi)  # 维度 (1, 65),reshape函数不考虑batch维

        #print('### xi shape: ', xi.output_shape)
        x_list.append(xi)

    # 对x进行concat,在第2维上,第一维是batch_size
    concated = layers.concatenate(x_list, axis=1)
    #print("#### output shape: ", K.shape(concated))

    model = models.Model(inputs=input_tensor, outputs=concated)
    print("#### model output shape: ", model.output_shape)

    # 绘制模型结构图
    print("### plot model")
    plot_model(model, to_file='simple_cnn.png', show_shapes=True)
    return model
Ejemplo n.º 22
0
    def base_model(self):
        '''
        this function combine the
        :return:
        '''

        # create the input to our final set of layers as the *output* of both
        # the MLP and CNN
        combinedInput = concatenate([
            self.__model_microaneurysm.output,
            self.__model_hemorrhage_and_hard_exudates.output
        ])
        # our final FC layer head will have two dense layers, the final one
        # being our regression head
        out = Flatten()(combinedInput)
        x = Dense(64, activation="relu")(out)
        x = Dense(16, activation="relu")(x)
        x = Dense(5, activation="softmax")(x)
        # our final model will accept categorical/numerical data on the MLP
        # input and images on the CNN input, outputting a single value (the
        # predicted price of the house)
        self.model = Model(inputs=[
            self.__model_microaneurysm.input,
            self.__model_hemorrhage_and_hard_exudates.input
        ],
                           outputs=x)

        opt = Adam(lr=0.001)
        self.model.compile(optimizer=opt,
                           loss="categorical_crossentropy",
                           metrics=['accuracy'])
        # out = Dense(1, activation='sigmoid')(concatenated)
        # model = Model([digit_a, digit_b], out)
        plot_model(self.model,
                   to_file='model_plot.png',
                   show_shapes=True,
                   show_layer_names=True)
        self.model.summary()
    def model(self):
        out = LeakyReLU(0.2)(self.layer_1(self.merged_image))
        out = LeakyReLU(0.2)(BatchNormalization()(self.layer_2(out)))
        out = LeakyReLU(0.2)(BatchNormalization()(self.layer_3(out)))
        out = LeakyReLU(0.2)(BatchNormalization()(self.layer_4(out)))
        out = LeakyReLU(0.2)(BatchNormalization()(self.layer_5(out)))

        patch_out = Activation('sigmoid')(self.layer_6(out))
        model = Model([self.input_image, self.target_image], patch_out)

        # Using Adam optimizer
        optimizer = Adam(lr=0.0002, beta_1=0.5)
        model.compile(loss='binary_crossentropy',
                      optimizer=optimizer,
                      loss_weights=[0.5])
        print(
            "\n************************************* Discriminator Model ****************************************"
        )
        print(model.summary())
        plot_model(model,
                   "modelplots/pix2pix/discriminator_model.png",
                   show_shapes=True,
                   show_layer_names=True)
        return model
Ejemplo n.º 24
0
def cnn(train_dir, test_dir):
    # 定数定義
    NUM_CLASSES = 4  # 識別クラス数
    IMAGE_SIZE = 256  # 学習時の画像サイズ[px]
    IMAGE_PIXELS = IMAGE_SIZE * IMAGE_SIZE * 3  # 画像の次元数
    LABEL_ANNOTATION_PATH = './label_annotation.txt'
    LOG_TRAINING_ACCURACY_GRAPH_PATH = './log/cnn/training_accuracy.png'
    LOG_TRAINING_LOSS_GRAPH_PATH = './log/cnn/training_loss.png'
    LOG_TRAINING_MODEL_PATH = './log/cnn/model.png'
    TRAINING_OPTIMIZER = "SGD(確率的勾配降下法)"
    ACTIVATION_FUNC = "relu"  #活性化関数
    # 学習データセットのインポート
    train_image, train_label, train_path = import_dataset(
        train_dir, IMAGE_SIZE, IMAGE_SIZE)

    #Kerasの学習モデルの構築
    model = Sequential()
    # 畳み込み層
    model.add(
        Conv2D(3,
               kernel_size=3,
               activation=ACTIVATION_FUNC,
               input_shape=(IMAGE_SIZE, IMAGE_SIZE, 3)))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(
        Conv2D(3,
               kernel_size=3,
               activation=ACTIVATION_FUNC,
               input_shape=(IMAGE_SIZE, IMAGE_SIZE, 3)))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(
        Conv2D(3,
               kernel_size=3,
               activation=ACTIVATION_FUNC,
               input_shape=(IMAGE_SIZE, IMAGE_SIZE, 3)))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Flatten())
    model.add(Activation(ACTIVATION_FUNC))
    model.add(Dropout(0.2))
    # 全結合層
    model.add(Dense(200))
    model.add(Activation(ACTIVATION_FUNC))
    model.add(Dropout(0.2))
    model.add(Dense(200))
    model.add(Activation(ACTIVATION_FUNC))
    model.add(Dropout(0.2))
    model.add(Dense(200))
    model.add(Activation(ACTIVATION_FUNC))
    model.add(Dropout(0.2))
    model.add(Dense(200))
    model.add(Activation(ACTIVATION_FUNC))
    model.add(Dropout(0.2))
    model.add(Dense(NUM_CLASSES))
    model.add(Activation("softmax"))

    # オプティマイザにAdamを使用
    opt = Adam(lr=0.001)
    # モデルをコンパイル
    model.compile(loss="categorical_crossentropy",
                  optimizer=opt,
                  metrics=["accuracy"])
    # 学習を実行
    Y = to_categorical(train_label, NUM_CLASSES)
    history = model.fit(train_image,
                        Y,
                        nb_epoch=40,
                        batch_size=100,
                        validation_split=0.1)

    export.plot(history)

    # テスト用データセットのインポート
    test_image, test_label, test_path = import_dataset(test_dir, IMAGE_SIZE,
                                                       IMAGE_SIZE)

    result = model.predict_classes(test_image)
    result_prob = model.predict_proba(test_image)
    sum_accuracy = 0.0
    for i in range(test_image.shape[0]):
        print("label:", test_label[i], "result:", result[i], "prob: ",
              result_prob[i])
        if test_label[i] == result[i]:
            sum_accuracy += 1
    sum_accuracy /= test_image.shape[0]
    print("accuracy: ", sum_accuracy)

    plot_model(model, show_shapes=True, to_file=LOG_TRAINING_MODEL_PATH)

    #中間層の出力
    imm_layer = ['conv2d', 'conv2d_1', 'conv2d_2']
    for layer_name in imm_layer:
        #中間層のmodelを作成
        intermediate_layer_model = Model(
            inputs=model.input, outputs=model.get_layer(layer_name).output)
        #出力をmodel.predictで見る
        intermediate_output = intermediate_layer_model.predict(test_image)
        path = os.getcwd() + "/log/cnn/" + layer_name
        if os.path.exists(path) == False:  # 出力先ディレクトリが存在しなければ新規作成する
            os.mkdir(path)
        for i in range(intermediate_output.shape[0]):
            cv2.imwrite(path + '/immidiate_' + str(i) + '.png',
                        intermediate_output[i] * 255)

    #結果をhtmlファイル出力
    result_dict = {'acc': 0, 'n_img': 0, 'opt': "", 'act_func': ""}
    result_dict['acc'] = sum_accuracy
    result_dict['n_img'] = train_image.shape[0]
    result_dict['opt'] = TRAINING_OPTIMIZER
    result_dict['act_func'] = ACTIVATION_FUNC
    export.cnn_html(result_dict, test_image, test_label, test_path, result,
                    result_prob, imm_layer)
Ejemplo n.º 25
0
#
# Beispiel der Benutzung von plot_model()
#
import tensorflow as tf
from tensorflow import keras
from tensorflow.python.keras.applications import VGG16
from tensorflow.python.keras.utils.vis_utils import plot_model
model = VGG16()
# Als PNG
plot_model(model, to_file='model_output.png', show_shapes=True, ¿
show_layer_names=True, rankdir="TB")
# Als SVG
plot_model(model, to_file='model_output.svg', show_
Ejemplo n.º 26
0
def createCNN1model(dataType):
    def getDataWithLabel():
        data = pd.read_pickle("DataSets/" + dataType + "_data.pkl")
        data = data.sample(frac=1).reset_index(drop=True)
        #
        # put labels into y_train variable
        labels = data['Category']
        # Drop 'label' column
        data = data.drop(labels=['Category'], axis=1)
        return data, labels

    def labelEncode(i):
        if 'blues' == i:
            return 0
        elif 'classical' == i:
            return 1
        elif 'country' == i:
            return 2
        elif 'disco' == i:
            return 3
        elif 'hiphop' == i:
            return 4
        elif 'jazz' == i:
            return 5
        elif 'metal' == i:
            return 6
        elif 'pop' == i:
            return 7
        elif 'reggae' == i:
            return 8
        else:
            return 9

    def labelDecode(i):
        if 0 == i:
            return 'blues'
        elif 1 == i:
            return "classical"
        elif 2 == i:
            return "country"
        elif 3 == i:
            return "disco"
        elif 4 == i:
            return "hiphop"
        elif 5 == i:
            return "jazz"
        elif 6 == i:
            return "metal"
        elif 7 == i:
            return "pop"
        elif 8 == i:
            return "reggae"
        else:
            return "rock"

    def fitLabelEncoder(labels):
        labelsEncode = []
        for i in range(labels.shape[0]):
            labelsEncode.append(labelEncode(labels[i]))
        labelsEncode = np.array(labelsEncode)
        return labelsEncode

    def fitLabelDecoder(labels):
        labelsDecode = []
        for i in range(labels.shape[0]):
            labelsDecode.append(labelDecode(labels[i]))
        labelsDecode = np.array(labelsDecode)
        return labelsDecode

    def createTestAndTrain():
        X_train, Y_train = getDataWithLabel()
        # Normalize the data
        X_train = X_train.astype('float16')
        X_train = X_train / 255.0
        print("Data was normalized..")
        print("Data shape: ", X_train.shape)
        #Reshape to matrix
        X_train = X_train.values.reshape(-1, 240, 240, 3)
        print("Data was reshaped..")
        #LabelEncode
        #labels = preprocessing.LabelEncoder().fit_transform(labels)
        Y_train = fitLabelEncoder(Y_train)
        print("Data was encoded..")
        #int to vector
        Y_train = to_categorical(Y_train, num_classes=10)
        #train and test data split

        #X_train, X_test, Y_train, Y_test= train_test_split(X_train, Y_train, test_size=0.1, random_state=42)
        #return X_train, X_test, Y_train, Y_test;
        return X_train, Y_train

    def createModel(X_train):
        model = Sequential()
        #
        model.add(
            Conv2D(filters=16,
                   kernel_size=(3, 3),
                   padding='Same',
                   activation='relu',
                   input_shape=(X_train.shape[1], X_train.shape[2],
                                X_train.shape[3])))
        model.add(MaxPool2D(pool_size=(2, 2)))
        model.add(Dropout(0.25))
        #
        model.add(
            Conv2D(filters=32,
                   kernel_size=(3, 3),
                   padding='Same',
                   activation='relu'))
        model.add(MaxPool2D(pool_size=(2, 2)))
        model.add(Dropout(0.25))
        #
        model.add(Flatten())
        model.add(Dense(256, activation="relu"))
        model.add(Dropout(0.5))
        model.add(Dense(10, activation="softmax"))

        # Define the optimizer
        #optimizer = tf.compat.v1.train.AdamOptimizer(1e-3, epsilon=1e-4)
        optimizer = tf.keras.optimizers.Adam(lr=0.001,
                                             beta_1=0.9,
                                             beta_2=0.999)

        model.compile(optimizer=optimizer,
                      loss="categorical_crossentropy",
                      metrics=["accuracy"])

        return model

    #set early stopping criteria
    pat = 10  #this is the number of epochs with no improvment after which the training will stop
    early_stopping = EarlyStopping(monitor='val_loss', patience=pat, verbose=1)

    #define the model checkpoint callback -> this will keep on saving the model as a physical file
    checkpointPath = dataType + '_CNN1_checkpoint.h5'
    model_checkpoint = ModelCheckpoint(checkpointPath,
                                       verbose=1,
                                       save_best_only=True)

    def plotAccLossGraphics(history):
        plt.title('Accuracies vs Epochs')
        plt.plot(history.history["val_accuracy"], label='Validation Acc')
        plt.plot(history.history["accuracy"], label='Training Acc')
        plt.legend()
        plt.show()
        # Plot the loss and accuracy curves for training and validation
        plt.plot(history.history['val_loss'], label="validation loss ")
        plt.plot(history.history['loss'], label="train loss ")
        plt.title("Test Loss")
        plt.xlabel("Number of Epochs")
        plt.ylabel("Loss")
        plt.legend()
        plt.show()

    def plotCategories(y_train, val_y):
        Y_train_classes = np.argmax(y_train, axis=1)
        Y_train_classes = fitLabelDecoder(Y_train_classes)

        plt.figure(figsize=(15, 7))
        g = sns.countplot(Y_train_classes, palette="icefire")
        plt.title("Train Number of digit classes")
        plt.show()

        Y_val_classes = np.argmax(val_y, axis=1)
        Y_val_classes = fitLabelDecoder(Y_val_classes)
        plt.figure(figsize=(15, 7))
        g = sns.countplot(Y_val_classes, palette="icefire")
        plt.title("Validation Number of digit classes")
        plt.show()
        gc.collect()

    def fit_and_evaluate(train_x, val_x, train_y, val_y):
        model = None
        gc.collect()
        model = createModel(train_x)
        batch_size = 32
        epochs = 30
        gc.collect()
        datagen = ImageDataGenerator(zoom_range=0.2, horizontal_flip=False)
        print("DataGen Started..")
        datagen.fit(train_x)
        print("DataGen Finished..")
        gc.collect()
        results = model.fit_generator(
            datagen.flow(train_x, train_y, batch_size=batch_size),
            epochs=epochs,
            callbacks=[early_stopping, model_checkpoint],
            verbose=1,
            validation_data=(val_x, val_y))
        gc.collect()
        print("Val Score: ", model.evaluate(val_x, val_y))
        return model, results

    def fitWithKfold(X, Y):
        n_folds = 5
        cv = model_selection.KFold(n_splits=n_folds, shuffle=True)
        t0 = time.time()
        i = 0
        maxAcc = 0
        accuracies = []
        for train_index, test_index in cv.split(X):
            i = i + 1
            xx_train, xx_test = X[train_index], X[test_index]
            yy_train, yy_test = Y[train_index], Y[test_index]
            gc.collect()
            print("xx_train data shape: ", xx_train.shape)
            print("xx_test data shape: ", xx_test.shape)
            t_x, val_x, t_y, val_y = train_test_split(
                xx_train,
                yy_train,
                test_size=0.1,
                random_state=np.random.randint(1, 1000, 1)[0])
            gc.collect()
            print("t_x data shape: ", t_x.shape)
            plotCategories(t_y, val_y)
            model, history = fit_and_evaluate(t_x, val_x, t_y, val_y)
            plotAccLossGraphics(history)
            gc.collect()
            print("Ended Fold: ", i)
            acc = predictTest(xx_test, yy_test, model, i)
            accuracies.append(acc)
            if acc > maxAcc:
                maxAcc = acc
                maxAccFold = i
                bestX_test = xx_test
                bestY_test = yy_test
        print("max accuracy: ", maxAcc, " on fold:", maxAccFold)
        return accuracies, bestX_test, bestY_test

    def predictTest(X_test, Y_test, lmodel, fold):
        Y_pred = lmodel.predict(X_test)
        # Convert predictions classes to one hot vectors
        Y_pred_classes = np.argmax(Y_pred, axis=1)
        Y_pred_classes = fitLabelDecoder(Y_pred_classes)
        Y_test_label = np.argmax(Y_test, axis=1)
        Y_test_label = fitLabelDecoder(Y_test_label)
        # compute the confusion matrix
        labels = [
            "blues", "classical", "country", "disco", "hiphop", "jazz",
            "metal", "pop", "reggae", "rock"
        ]
        confusion_mtx = confusion_matrix(Y_test_label, Y_pred_classes)
        acc = metrics.accuracy_score(Y_test_label, Y_pred_classes) * 100
        print(fold, 'th Accuracy percentage:', acc)

        return acc

    # confusion matrix-precios-recall
    def drawConfusionMatrix(X_test, Y_test, lmodel):

        Y_pred = lmodel.predict(X_test)
        # Convert predictions classes to one hot vectors
        Y_pred_classes = np.argmax(Y_pred, axis=1)
        Y_pred_classes = fitLabelDecoder(Y_pred_classes)
        Y_test_label = np.argmax(Y_test, axis=1)
        Y_test_label = fitLabelDecoder(Y_test_label)
        # compute the confusion matrix
        labels = [
            "blues", "classical", "country", "disco", "hiphop", "jazz",
            "metal", "pop", "reggae", "rock"
        ]
        confusion_mtx = confusion_matrix(Y_test_label, Y_pred_classes)
        # plot the confusion matrix
        f, ax = plt.subplots(figsize=(8, 8))
        sns.heatmap(confusion_mtx,
                    annot=True,
                    linewidths=0.01,
                    cmap="Greens",
                    linecolor="gray",
                    fmt='.1f',
                    ax=ax,
                    xticklabels=labels,
                    yticklabels=labels)
        plt.yticks(rotation=0)
        plt.xlabel("Predicted Label")
        plt.ylabel("True Label")
        plt.title("Confusion Matrix")
        plt.show()
        acc = metrics.accuracy_score(Y_test_label, Y_pred_classes) * 100

        print(classification_report(Y_test_label, Y_pred_classes))
        score = lmodel.evaluate(X_test, Y_test, verbose=0)
        print('Test loss:', score[0])
        print('Test accuracy:', score[1])

    X_train, Y_train = createTestAndTrain()
    #Visualize Model
    from tensorflow.python.keras.utils.vis_utils import plot_model
    model = createModel(X_train)
    plot_model(model,
               to_file='model_plot_Cnn1.png',
               show_shapes=True,
               show_layer_names=True)

    accuraciesList, bestX_test, bestY_test = fitWithKfold(X_train, Y_train)
    accuracies = np.array(accuraciesList)
    meanAccuracy = np.mean(accuracies)
    print(meanAccuracy)
    bestModel = tf.keras.models.load_model(dataType + '_CNN1_checkpoint.h5')
    drawConfusionMatrix(bestX_test, bestY_test, bestModel)
Ejemplo n.º 27
0

    print('(3) split data set...')
    p1 = int(len(data) * (1 - VALIDATION_SPLIT - TEST_SPLIT))
    p2 = int(len(data) * (1 - TEST_SPLIT))
    x_train = data[:p1]
    y_train = labels[:p1]
    x_val = data[p1:p2]
    y_val = labels[p1:p2]
    x_test = data[p2:]
    y_test = labels[p2:]
    print('train docs: ' + str(len(x_train)), 'val docs: ' + str(len(x_val)), 'test docs: ' + str(len(x_test)))


    print('(4) training model...')
    model = Sequential()
    model.add(Embedding(len(word_index) + 1, EMBEDDING_DIM, input_length=MAX_SEQUENCE_LENGTH))
    model.add(LSTM(200, dropout=0.2, recurrent_dropout=0.2))
    model.add(Dropout(0.2))
    model.add(Dense(labels.shape[1], activation='softmax'))
    model.summary()
    plot_model(model, to_file=os.path.join(ckpt_path, 'lstm_model.png'), show_shapes=True)

    model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['acc'])
    print(model.metrics_names)
    model.fit(x_train, y_train, validation_data=(x_val, y_val), epochs=2, batch_size=128)
    model.save(os.path.join(ckpt_path, 'lstm.h5'))

    print('(5) testing model...')
    print(model.evaluate(x_test, y_test))
Ejemplo n.º 28
0
                                weights=[embedding_matrix],
                                input_length=MAX_SEQUENCE_LENGTH,
                                trainable=False)

    print('(5) training model...')
    model = Sequential()
    model.add(embedding_layer)
    model.add(Dropout(0.2))
    model.add(Conv1D(250, 3, padding='valid', activation='relu', strides=1))
    model.add(MaxPooling1D(3))
    model.add(Flatten())
    model.add(Dense(EMBEDDING_DIM, activation='relu'))
    model.add(Dense(labels.shape[1], activation='softmax'))
    model.summary()
    plot_model(model,
               to_file=os.path.join(ckpt_path, 'word_vector_cnn_model.png'),
               show_shapes=True)

    model.compile(loss='categorical_crossentropy',
                  optimizer='rmsprop',
                  metrics=['acc'])
    print(model.metrics_names)
    model.fit(x_train,
              y_train,
              validation_data=(x_val, y_val),
              epochs=2,
              batch_size=128)
    model.save(os.path.join(ckpt_path, 'word_vector_cnn.h5'))

    print('(6) testing model...')
    print(model.evaluate(x_test, y_test))
Ejemplo n.º 29
0
    def __init__(self,
                 inputs=None,
                 targets=None,
                 loss_func="mse",
                 optimizer="adam",
                 load_weights_from=None,
                 plot_to_file=None,
                 **kwargs):
        # strictly check for inputs to be of type variable.
        inputs = to_list(inputs)
        if not all([is_variable(x) for x in inputs]):
            raise ValueError(
                'Please provide a `list` of `Variable` or `RadialBasis` objects for inputs. '
            )
        # prepare input tensors.
        input_vars = []
        for var in inputs:
            input_vars += var.inputs
        # check outputs if of correct type.
        if targets is None:
            if 'constraints' in kwargs:
                targets = kwargs.get('constraints')
            elif 'conditions' in kwargs:
                targets = kwargs.get('conditions')
        else:
            if 'conditions' in kwargs or 'constraints' in kwargs:
                raise TypeError(
                    'Inconsistent inputs: `constraints`, `conditions`, and `targets` are all equivalent keywords '
                    '- pass all targets as a list to `SciModel`. ')
        # setup constraints.
        targets = to_list(targets)
        for i, y in enumerate(targets):
            if not is_constraint(y):
                if is_functional(y):
                    # Case of Data-type constraint.
                    # By default, targets are initialized with Data.
                    targets[i] = Data(y)
                elif isinstance(y, tuple) and \
                        len(y) == 2 and \
                        is_functional(y[0]) and is_functional(y[1]):
                    # Case of Tie-type constraint.
                    targets[i] = Tie(y[0], y[1])
                else:
                    # Not recognised.
                    raise ValueError(
                        'The {}th target entry is not of type `Constraint` or `Functional` - '
                        'received \n ++++++ {} '.format(i, y))
        # prepare network outputs.
        output_vars = []
        for cond in targets:
            output_vars += cond().outputs
        # prepare loss_functions.
        if isinstance(loss_func, str):
            loss_func = SciModel.loss_functions(loss_func)
        elif not callable(loss_func):
            raise TypeError(
                'Please provide a valid loss function from ("mse", "mae") ' +
                "or a callable function for input of tensor types. ")
        # Initialize the Model form super class.
        model = Model(inputs=input_vars, outputs=output_vars, **kwargs)
        # compile the model.
        loss_weights = [K.variable(1.0) for v in output_vars]
        if isinstance(optimizer, str) and \
                len(optimizer.lower().split("scipy-")) > 1:
            model.compile(loss=loss_func,
                          optimizer=GradientObserver(method=optimizer),
                          loss_weights=loss_weights)
        else:
            model.compile(loss=loss_func,
                          optimizer=optimizer,
                          loss_weights=loss_weights)
        # model.train_function = True

        # set initial state of the model.
        if load_weights_from is not None:
            if os.path.exists(load_weights_from):
                model.load_weights(load_weights_from)
            else:
                raise Warning("File not found - load_weights_from: {}".format(
                    load_weights_from))
        # Set the variables.
        self._model = model
        self._inputs = inputs
        self._constraints = targets
        self._loss_func = loss_func
        self._optimizer = optimizer
        self._loss_weights = loss_weights
        self._callbacks = {}
        # Plot to file if requested.
        if plot_to_file is not None:
            plot_model(self._model, to_file=plot_to_file)
Ejemplo n.º 30
0
 def plot_model(self, *args, **kwargs):
     """ Keras plot_model functionality.
         Refer to Keras documentation for help.
     """
     plot_model(self._model, *args, **kwargs)
def conv_nn(conv_depth):
    x_train, y_train, x_test, y_test, x_valid, y_valid = data_preparation()

    # Reshape input data from (28, 28) to (28, 28, 1)
    x_train = x_train.reshape(x_train.shape[0], 28, 28, 1)
    x_valid = x_valid.reshape(x_valid.shape[0], 28, 28, 1)
    x_test = x_test.reshape(x_test.shape[0], 28, 28, 1)

    # One-hot encode the labels
    y_train = to_categorical(y_train, 10)
    y_valid = to_categorical(y_valid, 10)
    y_test = to_categorical(y_test, 10)

    # Print training set shape
    print("x_train shape:", x_train.shape, "y_train shape:", y_train.shape)

    # Print the number of training, validation, and test datasets
    print(x_train.shape[0], 'train set')
    print(x_valid.shape[0], 'validation set')
    print(x_test.shape[0], 'test set')

    # Model Definition
    # One hidden CONV layer
    model = Sequential()
    model.add(
        Conv2D(filters=32,
               kernel_size=2,
               padding='same',
               activation='relu',
               input_shape=(28, 28, 1)))
    model.add(MaxPooling2D(pool_size=2))
    model.add(Dropout(0.3))

    # Add a second hidden layer
    if conv_depth > 1:
        model.add(
            Conv2D(filters=64,
                   kernel_size=2,
                   padding='same',
                   activation='relu'))
        model.add(MaxPooling2D(pool_size=2))
        model.add(Dropout(0.3))

    # Add a third hidden layer
    if conv_depth > 2:
        model.add(
            Conv2D(filters=128,
                   kernel_size=2,
                   padding='same',
                   activation='relu'))
        model.add(Dropout(0.3))

    model.add(Flatten())
    model.add(Dense(256, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(10, activation='softmax'))
    model.summary()
    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])

    history = model.fit(x_train,
                        y_train,
                        batch_size=64,
                        epochs=30,
                        validation_data=(x_valid, y_valid),
                        callbacks=callbacks,
                        verbose=1)

    test_loss, test_acc = model.evaluate(x_test, y_test, verbose=2)
    print('\nTest accuracy:', test_acc)

    # Training history plotting
    Visualisations.plot_history(history, 'CNN')

    # From one-hot-encoded representation -> back to categorical values
    rounded_labels = np.argmax(y_test, axis=1)

    # Model evaluation for prediction performance on the Test Set
    model_eval(model, x_test, rounded_labels)

    # model.save('saved/C-NN Model')
    plot_model(model,
               to_file='CNN_plot.png',
               show_shapes=True,
               show_layer_names=True)
Ejemplo n.º 32
0
    args = parser.parse_args()

    np.random.seed(args.seed)
    tf.random.set_seed(args.seed)
    random.seed(args.seed)

    model = ResUNet(input_shape=(128, 128, 1),
                    classes=2,
                    filters_root=16,
                    depth=3)
    model.summary()

    if args.plot_model:
        from tensorflow.python.keras.utils.vis_utils import plot_model
        plot_model(model, show_shapes=True)

    model.compile(loss="categorical_crossentropy",
                  optimizer="adam",
                  metrics=["categorical_accuracy"])

    train_dataset = list(
        zip(*list(SimpleDataset(args.train_dataset_dir_path)())))
    train_dataset = (np.array(train_dataset[0]), np.array(train_dataset[1]))
    x = np.array(train_dataset[0])
    y = np.array(train_dataset[1])

    validation_dataset = list(
        zip(*list(SimpleDataset(args.validation_dataset_dir_path)())))
    validation_dataset = (np.array(validation_dataset[0]),
                          np.array(validation_dataset[1]))