예제 #1
0
    def _build_model(self):
        model = Sequential()

        model.add(InputLayer(self.input_shape))
        model.add(
            Conv2D(filters=64, kernel_size=1, data_format='channels_first'))
        model.add(self.activation())
        model.add(
            Conv2D(filters=64, kernel_size=1, data_format='channels_first'))
        model.add(self.activation())
        model.add(
            Conv2D(filters=16, kernel_size=1, data_format='channels_first'))
        model.add(self.activation())

        model.add(Flatten())

        model.add(Dense(units=256))
        model.add(self.activation())

        if self.dropout_rate > 0:
            model.add(Dropout(self.dropout_rate))

        model.add(Dense(units=256))
        model.add(self.activation())

        if self.dropout_rate > 0:
            model.add(Dropout(self.dropout_rate))

        model.add(Dense(units=128))
        model.add(self.activation())

        model.add(Dense(units=3))
        return model
예제 #2
0
    def create_layer(self, layer):
        layer_type = layer.type

        if layer_type == Layers.CONV:
            return Conv2D(filters=layer.filters,
                          kernel_size=layer.kernel_size,
                          strides=layer.strides,
                          padding='same')
        elif layer_type == Layers.ACTIVATION:
            return Activation(layer.func)
        elif layer_type == Layers.INPUT:
            return InputLayer(input_shape=layer.shape)
        elif layer_type == Layers.DENSE:
            return Dense(units=layer.nodes)
        elif layer_type == Layers.FLATTEN:
            return Flatten()
        elif layer_type == Layers.DROPOUT:
            return Dropout(rate=layer.rate)
        elif layer_type == Layers.POOLING:
            if layer.operation == 'max':
                return MaxPooling2D(pool_size=layer.pool_size,
                                    strides=layer.strides,
                                    padding='same')
            elif layer.operation == 'average':
                return AveragePooling2D(pool_size=layer.pool_size,
                                        strides=layer.strides,
                                        padding='same')
        elif layer_type == Layers.BATCH_NORM:
            return BatchNormalization(momentum=layer.momentum)
예제 #3
0
def lstm(x_dim) -> Model:
    model = Sequential()
    model.add(InputLayer(input_shape=(x_dim, 1)))
    model.add(LSTM(256, return_sequences=True))
    model.add(SeqWeightedAttention())

    return model
예제 #4
0
    def handle(self, *args, **kwargs):
        os.makedirs("tmp", exist_ok=True)

        version = kwargs['mlversion']
        path = ccp.weights_for("density", version)
        output = kwargs['output']
        print("Converting {0} to {1}".format(path, output))

        model = load_model(path)

        # Create a new input layer to replace the (None,None,3) input layer
        input_layer = InputLayer(input_shape=(675, 900, 3), name="input_1")

        # Save
        intermediary_path = "tmp/reshaped_model.h5"
        model.layers[0] = input_layer
        model.save(intermediary_path)

        # Convert
        coreml_model = coremltools.converters.keras.convert(
            intermediary_path,
            input_names=['input_1'],
            image_input_names=['input_1'],
            output_names=['density_map'])

        # Set model metadata
        coreml_model.author = 'Dimitri Roche'
        coreml_model.short_description = 'Generates a density map with the crowd estimate as the sum of pixels'
        coreml_model.input_description[
            'input_1'] = 'Image to calculate density map'
        coreml_model.output_description[
            'density_map'] = 'Density map where the sum of pixels is crowd count'

        coreml_model.save(output)
예제 #5
0
    def __init__(self, props) -> None:
        print(props.dense)
        self.props = props
        self.model = Sequential()
        self.model.add(InputLayer(input_shape=props.input_shape))
        self.model.add(BatchNormalization())
        self.model.add(
            Conv2D(props.output_classes,
                   padding="same",
                   strides=(props.stride_x, props.stride_y),
                   kernel_size=(props.kernel_x, props.kernel_y)))
        self.model.add(MaxPool2D(padding="same"))
        self.model.add(Flatten())
        self.model.add(Dense(props.dense))
        self.model.add(Dense(props.output_classes))
        self.tf_cb = callbacks.TensorBoard(
            log_dir='./logs/{}'.format(props.id()),
            batch_size=props.batch_size,
            write_graph=False,
        )

        self.model.compile(
            loss=categorical_crossentropy,
            optimizer=props.optimizer,
            metrics=props.metrics,
        )
예제 #6
0
def trainV2(X, Y, imageShape):
    model = Sequential()
    model.add(InputLayer(input_shape=(3, )))
    model.add(Dense(4, activation='relu'))
    model.add(Dense(5, activation='softmax'))

    model.summary()

    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])

    tensor_board = TensorBoard(
        'model/fullyConnectedLayers/tensorboardFullyConnectedModel')

    model.fit(X,
              Y,
              batch_size=8,
              epochs=10000,
              verbose=1,
              validation_split=0.2,
              shuffle=True,
              callbacks=[tensor_board])

    model.save('model/fullyConnectedLayers/fullyConnectedSavedModel/annV2.h5',
               include_optimizer='true')
예제 #7
0
def create_empty_model():
    """ Creation of empty model """
    model = Sequential()
    model.add(InputLayer(input_shape=(106,)))
    model.add(Dense(53))
    model.add(Dense(2))
    model.compile(loss='mse', optimizer='adam', metrics=['accuracy'])
    return model
예제 #8
0
 def create_model(self):
     model = Sequential()
     model.add(
         InputLayer(batch_input_shape=(1, self.maze.size[0] *
                                       self.maze.size[1])))
     model.add(Dense(4, activation='sigmoid'))
     model.add(Dense(len(self.map2str), activation='linear'))
     model.compile(loss='mse', optimizer='adam', metrics=['mae'])
     model.summary()
     return model
def _build_encoder():
    model = Sequential(name="encoder")
    model.add(InputLayer(input_shape=(None, None, 1)))
    model.add(Conv2D(64, (3, 3), activation="relu", padding="same", strides=2))
    model.add(Conv2D(128, (3, 3), activation="relu", padding="same"))
    model.add(Conv2D(128, (3, 3), activation="relu", padding="same", strides=2))
    model.add(Conv2D(256, (3, 3), activation="relu", padding="same"))
    model.add(Conv2D(256, (3, 3), activation="relu", padding="same", strides=2))
    model.add(Conv2D(512, (3, 3), activation="relu", padding="same"))
    model.add(Conv2D(512, (3, 3), activation="relu", padding="same"))
    model.add(Conv2D(256, (3, 3), activation="relu", padding="same"))
    return model
예제 #10
0
def _build_encoder():
    model = Sequential(name='encoder')
    model.add(InputLayer(input_shape=(None, None, 1)))
    model.add(Conv2D(64, (3, 3), activation='relu', padding='same', strides=2))
    model.add(Conv2D(128, (3, 3), activation='relu', padding='same'))
    model.add(Conv2D(128, (3, 3), activation='relu', padding='same', strides=2))
    model.add(Conv2D(256, (3, 3), activation='relu', padding='same'))
    model.add(Conv2D(256, (3, 3), activation='relu', padding='same', strides=2))
    model.add(Conv2D(512, (3, 3), activation='relu', padding='same'))
    model.add(Conv2D(512, (3, 3), activation='relu', padding='same'))
    model.add(Conv2D(256, (3, 3), activation='relu', padding='same'))
    return model
예제 #11
0
def _build_decoder(encoding_depth):
    model = Sequential(name='decoder')
    model.add(InputLayer(input_shape=(None, None, encoding_depth)))
    model.add(Conv2D(128, (3, 3), activation='relu', padding='same'))
    model.add(UpSampling2D((2, 2)))
    model.add(Conv2D(64, (3, 3), activation='relu', padding='same'))
    model.add(Conv2D(64, (3, 3), activation='relu', padding='same'))
    model.add(UpSampling2D((2, 2)))
    model.add(Conv2D(32, (3, 3), activation='relu', padding='same'))
    model.add(Conv2D(2, (3, 3), activation='tanh', padding='same'))
    model.add(UpSampling2D((2, 2)))
    return model
def _build_decoder(encoding_depth):
    model = Sequential(name="decoder")
    model.add(InputLayer(input_shape=(None, None, encoding_depth)))
    model.add(Conv2D(128, (3, 3), activation="relu", padding="same"))
    model.add(UpSampling2D((2, 2)))
    model.add(Conv2D(64, (3, 3), activation="relu", padding="same"))
    model.add(Conv2D(64, (3, 3), activation="relu", padding="same"))
    model.add(UpSampling2D((2, 2)))
    model.add(Conv2D(32, (3, 3), activation="relu", padding="same"))
    model.add(Conv2D(2, (3, 3), activation="tanh", padding="same"))
    model.add(UpSampling2D((2, 2)))
    return model
예제 #13
0
def main(batch_size=8, epochs=300, images_per_epoch=8192, validation_images=1024, image_size=224, color_space='yuv',
         train_data_dir='/mnt/bolbol/raw-data/train', valid_data_dir='/mnt/bolbol/raw-data/validation',
         model_save_dir='finetune_models'):
    """ FineTune VGG16 to work on black and white images that are passed as inputs to colorizer """
    data_mapper = get_mapper(color_space=color_space, classifier=False)

    ''' Modify VGG16 to work with greyscale images '''
    vgg = VGG16()
    for layer in vgg.layers:
        layer.trainable = False
    vgg.get_layer(name='block1_conv1').trainable = True
    vgg.get_layer(name='block1_conv2').trainable = True
    vgg.get_layer(name='block2_conv1').trainable = True
    vgg.get_layer(name='block2_conv2').trainable = True

    needed_layers = vgg.layers[2:]
    model = Sequential()
    model.add(InputLayer(input_shape=(image_size, image_size, 1), name='gray'))
    model.add(Conv2D(filters=64, kernel_size=3, padding='same'))
    for layer in needed_layers:
        model.add(layer)
    model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
    model.summary()
    train_generator = ImageDataGenerator().flow_from_directory(directory=train_data_dir,
                                                               interpolation='bilinear',
                                                               target_size=(image_size, image_size),
                                                               batch_size=batch_size,
                                                               color_mode='rgb', class_mode='sparse')
    valid_generator = ImageDataGenerator().flow_from_directory(directory=valid_data_dir,
                                                               interpolation='bilinear',
                                                               target_size=(image_size, image_size),
                                                               batch_size=batch_size,
                                                               color_mode='rgb', class_mode='sparse')
    train_generator = ImageGenerator(rgb_generator=train_generator, workers=4, input_processing_function=data_mapper.rgb_to_colorizer_input)
    valid_generator = ImageGenerator(rgb_generator=valid_generator, workers=4, input_processing_function=data_mapper.rgb_to_colorizer_input)

    # Configure model checkpoints
    model_save_path = os.path.join(model_save_dir, 'vgg-{epoch:02d}-{val_acc:.2f}-{val_loss:.2f}.hdf5')
    if not os.path.exists(model_save_dir):
        os.mkdir(model_save_dir)

    ''' FineTune VGG '''
    model.fit_generator(generator=train_generator,
                        steps_per_epoch=images_per_epoch // batch_size,
                        epochs=epochs,
                        validation_data=valid_generator,
                        validation_steps=validation_images // batch_size,
                        callbacks=[EarlyStopping(patience=5),
                                   ModelCheckpoint(filepath=model_save_path, monitor='val_acc', save_best_only=True)])
예제 #14
0
def trainV1(X, Y, imageShape, model_name):
    if model_name is None or model_name is "":
        model = Sequential()
        model.add(
            InputLayer(input_shape=(imageShape['rows'], imageShape['columns'],
                                    imageShape['channels'])))
        model.add(Flatten())
        model.add(Dense(
            4096,
            activation='relu',
        ))
        model.add(Dropout(0.5))
        model.add(Dense(4096, activation='relu'))
        model.add(Dense(
            4096,
            activation='relu',
        ))
        model.add(Dropout(0.5))
        model.add(Dense(4096, activation='relu'))

        model.add(Dense(16, activation='softmax'))

        model.compile(loss='categorical_crossentropy',
                      optimizer='adam',
                      metrics=['accuracy'])
    else:
        print("loading the saved model")
        model = keras.models.load_model(
            'model/fullyConnectedLayers/fullyConnectedSavedModel/' +
            model_name)

    model.summary()
    tensor_board = TensorBoard(
        'model/fullyConnectedLayers/tensorboardFullyConnectedModel')

    model.fit(X,
              Y,
              batch_size=32,
              epochs=1,
              verbose=1,
              validation_split=0.1,
              shuffle=True,
              callbacks=[tensor_board])

    model.save('model/fullyConnectedLayers/fullyConnectedSavedModel/' +
               "annV1.h5",
               include_optimizer='true')
    print("model saved")
예제 #15
0
def to_fully_conv(model):
    """ This methods converts a sequential model to a fully convolutional model."""

    new_model = Sequential()

    input_layer = InputLayer(input_shape=(None, None, 3), name="input_new")

    new_model.add(input_layer)

    for layer in model.layers:

        if "Flatten" in str(layer):
            flattened_ipt = True
            f_dim = layer.input_shape

        elif "Dense" in str(layer):

            input_shape = layer.input_shape
            output_dim = layer.get_weights()[1].shape[0]
            W, b = layer.get_weights()

            if flattened_ipt:
                shape = (f_dim[1], f_dim[2], f_dim[3], output_dim)
                new_W = W.reshape(shape)
                new_layer = Convolution2D(output_dim,
                                          (f_dim[1], f_dim[2]),
                                          strides=(1, 1),
                                          activation=layer.activation,
                                          padding='valid',
                                          weights=[new_W, b])
                flattened_ipt = False

            else:
                shape = (1, 1, input_shape[1], output_dim)
                new_W = W.reshape(shape)
                new_layer = Convolution2D(output_dim,
                                          (1, 1),
                                          strides=(1, 1),
                                          activation=layer.activation,
                                          padding='valid',
                                          weights=[new_W, b])

        else:
            new_layer = layer

        new_model.add(new_layer)

    return new_model
예제 #16
0
 def build(input_shape, classes):
     model = Sequential()
     model.add(InputLayer(input_shape=input_shape))
     model.add(Conv2D(16, (8, 20), activation='relu'))
     model.add(MaxPooling2D(pool_size=(2, 2)))
     model.add(Dropout(0.25))
     model.add(Conv2D(32, (4, 10), activation='relu'))
     model.add(MaxPooling2D(pool_size=(2, 2)))
     model.add(Dropout(0.25))
     # models.add(Conv2D(64, (4, 10), activation='relu'))
     # models.add(MaxPooling2D(pool_size=(2, 2)))
     # models.add(Dropout(0.25))
     model.add(Flatten())
     model.add(Dense(128, activation='relu'))
     model.add(Dropout(0.5))
     model.add(Dense(classes, activation='softmax'))
     return model
예제 #17
0
    def __call__(self, *args, **kwargs):
        # Share weights of character-level embedding
        embedding_layer = TimeDistributed(Sequential([
            InputLayer(input_shape=(self.chars_per_word, )),
            Embedding(input_dim=self.input_dim,
                      output_dim=self.embedding_size,
                      input_length=self.chars_per_word,
                      mask_zero=True),
            Bidirectional(GRU(units=24))
        ]),
                                          name='CharEmbedding')

        inputs = [
            Input(shape=(shape, self.chars_per_word)) for shape in self.shapes
        ]
        embeddings = [embedding_layer(char_input) for char_input in inputs]
        return inputs, embeddings
예제 #18
0
def nn(shape, no_classes):
    """neural network model"""

    model = Sequential()

    model.add(InputLayer(input_shape=shape))
    model.add(Dense(100))
    model.add(Activation('relu'))

    model.add(Dense(20))
    model.add(Activation('relu'))

    model.add(Dense(no_classes))
    model.add(Activation('softmax'))

    model.compile(loss='categorical_crossentropy',
                  optimizer=sgd(lr=0.01),
                  metrics=['categorical_accuracy'])

    return model
예제 #19
0
def create_model():
    model = Sequential()
    model.add(
        InputLayer(batch_input_shape=(1, NUMBER_OF_ROWS * NUMBER_OF_COLUMNS),
                   name='input'))
    model.add(
        Dense(NUMBER_OF_COLUMNS * NUMBER_OF_ROWS * 2,
              input_shape=(NUMBER_OF_ROWS * NUMBER_OF_COLUMNS, ),
              activation=sigmoid,
              name='hidden1'))
    model.add(
        Dense(NUMBER_OF_COLUMNS * 2,
              input_shape=(NUMBER_OF_COLUMNS * NUMBER_OF_ROWS * 2, ),
              activation=sigmoid,
              name='hidden2'))
    model.add(
        Dense(NUMBER_OF_COLUMNS,
              input_shape=(NUMBER_OF_COLUMNS * 2, ),
              activation=linear,
              name='output'))
    model.compile(loss=mean_squared_error,
                  optimizer=adam(),
                  metrics=[mean_absolute_error])
    return model
예제 #20
0
    def transform_layer(layer, next_layer, queue_ctr, flattened):
        print("transform {} (next = {})".format(layer, next_layer))
        new_layers = []
        skip_next = False

        if isinstance(layer, InputLayer):
            new_layers.append(InputLayer.from_config(layer.get_config()))

        elif isinstance(layer, Conv2D) and not isinstance(layer, DepthwiseConv2D):
            conf = layer.get_config()

            act = conf['activation']

            # if the next layer is a pooling layer, create a fused activation
            maxpool_params = None
            if slalom and isinstance(next_layer, MaxPooling2D):
                mp = next_layer
                assert (layer.activation == relu)
                maxpool_params = mp.get_config()
                skip_next = True

            act_layer = None
            if act != "linear":
                conf['activation'] = "linear"

                if slalom and isinstance(next_layer, GlobalAveragePooling2D):
                    assert layer.activation in [relu, relu6]
                    act = "avgpool" + act
                    skip_next = True

                act_layer = ActivationQ(act, bits_w, bits_x, maxpool_params=maxpool_params, log=log,
                                        quantize=quantize, slalom=slalom, slalom_integrity=slalom_integrity,
                                        slalom_privacy=slalom_privacy, sgxutils=sgxutils,
                                        queue=None if queues is None else queues[queue_ctr])
                queue_ctr += 1

            conf['bits_w'] = bits_w
            conf['bits_x'] = bits_x
            conf['log'] = log
            conf['quantize'] = quantize
            conf['slalom'] = slalom
            conf['slalom_integrity'] = slalom_integrity
            conf['slalom_privacy'] = slalom_privacy
            conf['sgxutils'] = sgxutils

            new_layer = Conv2DQ.from_config(conf)
            new_layers.append(new_layer)
            layer_map[new_layer] = layer

            if act_layer is not None:
                new_layers.append(act_layer)

        elif isinstance(layer, DepthwiseConv2D):
            conf = layer.get_config()

            assert conf['activation'] == "linear"

            conf['bits_w'] = bits_w
            conf['bits_x'] = bits_x
            conf['log'] = log
            conf['quantize'] = quantize
            conf['slalom'] = slalom
            conf['slalom_integrity'] = slalom_integrity
            conf['slalom_privacy'] = slalom_privacy
            conf['sgxutils'] = sgxutils

            new_layer = DepthwiseConv2DQ.from_config(conf)
            new_layers.append(new_layer)
            layer_map[new_layer] = layer

        elif isinstance(layer, Dense):
            conf = layer.get_config()

            act = conf['activation']

            act_layer = None
            if act != "linear":
                conf['activation'] = "linear"
                act_layer = ActivationQ(act, bits_w, bits_x, log=log,
                                        quantize=quantize, slalom=slalom, slalom_integrity=slalom_integrity,
                                        slalom_privacy=slalom_privacy, sgxutils=sgxutils,
                                        queue=None if queues is None else queues[queue_ctr])
                queue_ctr += 1

            conf['bits_w'] = bits_w
            conf['bits_x'] = bits_x
            conf['log'] = log
            conf['quantize'] = quantize
            conf['slalom'] = slalom
            conf['slalom_integrity'] = slalom_integrity
            conf['slalom_privacy'] = slalom_privacy
            conf['sgxutils'] = sgxutils

            # replace the dense layer by a pointwise convolution
            if verif_preproc:
                del conf['units']
                conf['filters'] = layer.units
                conf['kernel_size'] = 1
                if not flattened:
                    h_in = int(layer.input_spec.axes[-1])
                    new_layers.append(Reshape((1, 1, h_in)))
                    flattened = True
                new_layer = Conv2DQ.from_config(conf)
                new_layers.append(new_layer)
                layer_map[new_layer] = layer

            else:
                new_layer = DenseQ.from_config(conf)
                new_layers.append(new_layer)
                layer_map[new_layer] = layer

            if act_layer is not None:
                new_layers.append(act_layer)

        elif isinstance(layer, BatchNormalization):
            pass

        elif isinstance(layer, MaxPooling2D):
            assert (not slalom or not slalom_privacy)
            new_layers.append(MaxPooling2D.from_config(layer.get_config()))

        elif isinstance(layer, AveragePooling2D):
            assert (not slalom or not slalom_privacy)
            new_layers.append(AveragePooling2D.from_config(layer.get_config()))
            new_layers.append(Lambda(lambda x: K.round(x)))

        elif isinstance(layer, Activation):
            assert layer.activation in [relu6, relu, softmax]

            queue = None if queues is None else queues[queue_ctr]
            queue_ctr += 1

            act_func = "relu6" if layer.activation == relu6 else "relu" if layer.activation == relu else "softmax"
            if slalom and isinstance(next_layer, GlobalAveragePooling2D):
                #assert layer.activation == relu6
                act_func = "avgpoolrelu6"
                skip_next = True

            maxpool_params = None
            if slalom and (isinstance(next_layer, MaxPooling2D) or isinstance(next_layer, AveragePooling2D)):
                mp = next_layer
                assert (layer.activation == relu)
                maxpool_params = mp.get_config()
                skip_next = True

            new_layers.append(ActivationQ(act_func, bits_w, bits_x, log=log,
                                      maxpool_params=maxpool_params,
                                      quantize=quantize, slalom=slalom,
                                      slalom_integrity=slalom_integrity,
                                      slalom_privacy=slalom_privacy,
                                      sgxutils=sgxutils, queue=queue))

        elif isinstance(layer, ZeroPadding2D):
            if quantize:
                # merge with next layer
                conv = next_layer 
                assert isinstance(conv, Conv2D) or isinstance(conv, DepthwiseConv2D)
                assert conv.padding == 'valid'
                conv.padding = 'same'
            else:
                new_layers.append(ZeroPadding2D.from_config(layer.get_config()))

        elif isinstance(layer, Flatten):
            if not verif_preproc:
                new_layers.append(Flatten.from_config(layer.get_config()))

        elif isinstance(layer, GlobalAveragePooling2D):
            assert not slalom
            conf = layer.get_config()
            conf['bits_w'] = bits_w
            conf['bits_x'] = bits_x
            conf['log'] = log
            conf['quantize'] = quantize
            new_layers.append(GlobalAveragePooling2DQ.from_config(conf))

        elif isinstance(layer, Reshape):
            new_layers.append(Reshape.from_config(layer.get_config()))

        elif isinstance(layer, Dropout):
            pass

        elif isinstance(layer, ResNetBlock):
            #assert not slalom

            path1 = []
            path2 = []
            for l in layer.path1:
                lq, queue_ctr, _, _ = transform_layer(l, None, queue_ctr, flattened)
                path1.extend(lq)

            for l in layer.path2:
                lq, queue_ctr, _, _ = transform_layer(l, None, queue_ctr, flattened)
                path2.extend(lq)

            [actq], queue_ctr, flattened, skip_next = transform_layer(layer.merge_act, next_layer, queue_ctr, flattened)
            new_layer = ResNetBlock(layer.kernel_size, layer.filters, layer.stage, layer.block, layer.identity,
                                    layer.strides, path1=path1, path2=path2, merge_act=actq, 
                                    quantize=quantize, bits_w=bits_w, bits_x=bits_x,
                                    slalom=slalom, slalom_integrity=slalom_integrity, slalom_privacy=slalom_privacy)

            new_layers.append(new_layer)
        else:
            raise AttributeError("Don't know how to handle layer {}".format(layer))

        return new_layers, queue_ctr, flattened, skip_next
예제 #21
0
파일: keras.py 프로젝트: tedil/lyner
def autoencode(pipe: Pipe,
               layer_config: List[Dict],
               from_file: str,
               store_model: str,
               loss: str,
               optimiser: str,
               epochs: int,
               batch_size: int,
               shuffle: bool,
               validation_split: float,
               adjust_weights: float,
               mode: str):
    """Build and train an autoencoder."""
    import keras
    from keras import regularizers, Sequential, Input, Model
    from keras.callbacks import EarlyStopping, TensorBoard
    from keras.engine import InputLayer
    from keras.engine.saving import model_from_yaml, model_from_json
    from keras.layers import Dense
    from numpy.random.mtrand import seed
    from tensorflow import set_random_seed
    from lyner.keras_extras import SignalHandler
    seed(1)
    set_random_seed(2)
    matrix = pipe.matrix.copy()
    if matrix.isnull().values.any():
        LOGGER.warning("Dropping rows containing nan values")
        matrix.dropna(how='any', inplace=True)

    def parse_layout(layer_conf):
        get_layer_type = lambda t: getattr(keras.layers, t, None)
        regdict = {'l1_l2': regularizers.l1_l2, 'l1': regularizers.l1, 'l2': regularizers.l2}
        lc = layer_conf.copy()
        layer_type = lc.get('type', None)
        if layer_type:
            lc['type'] = get_layer_type(layer_type)

        # TODO parse regularizers
        kernel_reg_type = lc.get('kernel_regularizer', None)
        if kernel_reg_type:
            if '(' in kernel_reg_type and ')' in kernel_reg_type:
                params = kernel_reg_type[kernel_reg_type.index('(') + 1:kernel_reg_type.index(')')]
                if '+' in params:
                    params = params.split('+')
                else:
                    params = [params]
                params = [float(p) for p in params]
                kernel_reg_type = kernel_reg_type[:kernel_reg_type.index('(')]
            lc['kernel_regularizer'] = regdict[kernel_reg_type](*params)
        return lc.pop('type'), int(lc.pop('n')), lc

    layout = [parse_layout(layer_conf) for layer_conf in layer_config]
    labels = matrix.columns.values.tolist()
    data = matrix.values
    shape = (data.shape[0],)
    data = data.transpose()
    if layout:
        encoding_dim = layout[-1][1]
        encoder = Sequential(name="encoder")
        encoder.add(InputLayer(shape, name="encoder_input"))
        for layer_num, (Layer, n_nodes, extra_args) in enumerate(layout):
            encoder.add(Layer(n_nodes, name=f"encoder_{layer_num}_{n_nodes}", **extra_args))
            # kernel_regularizer=regularizers.l1_l2(0.001, 0.001),
            # kernel_regularizer=regularizers.l1(0.0001),

        decoder = Sequential(name="decoder")
        decoder.add(InputLayer((encoding_dim,), name="decoder_input"))
        for layer_num, (Layer, n_nodes, _) in enumerate(layout[::-1][1:]):
            decoder.add(Layer(n_nodes, name=f"decoder_{layer_num}_{n_nodes}"))
        decoder.add(Dense(shape[0], activation='linear', name="decoder_output"))

        input_layer = Input(shape=shape, name="autoencoder_input")
        encode_layer = encoder(input_layer)
        decode_layer = decoder(encode_layer)

        autoencoder = Model(input_layer, decode_layer)
        if store_model:
            if store_model.endswith('.yaml'):
                model_string = autoencoder.to_yaml()
            elif store_model.endswith('.json'):
                model_string = autoencoder.to_json()
            else:
                model_string = autoencoder.to_yaml()
            with open(store_model, 'wt') as writer:
                writer.write(model_string)
    elif from_file:
        with open(from_file, 'rt') as reader:
            model_string = '\n'.join(reader.readlines())
        if from_file.endswith('.yaml'):
            autoencoder = model_from_yaml(model_string)
        elif from_file.endswith('.json'):
            autoencoder = model_from_json(model_string)
        # TODO set encoder and decoder correctly
    else:
        raise ValueError("No model specified. Use either of --layer-config or --from-file.")
    # from pprint import pprint
    # pprint(autoencoder.get_config())
    autoencoder.compile(optimizer=optimiser, loss=loss, metrics=['mse'], )

    early_stopping = EarlyStopping(monitor='val_loss', min_delta=0.0000001, patience=50)

    sh = SignalHandler()
    autoencoder.fit(np.vsplit(data, 1), np.vsplit(data, 1),
                    callbacks=[TensorBoard(log_dir='/tmp/autoencoder'), sh, early_stopping],
                    epochs=epochs,
                    batch_size=batch_size,
                    validation_split=validation_split,
                    shuffle=shuffle
                    )
    sh.uninit()

    class Autoencoder:
        def __init__(self, encoder=None, decoder=None):
            self._encoder = encoder
            self._decoder = decoder

        def inverse_transform(self, data):
            return self._decoder.predict(data).transpose()

        def transform(self, data):
            return self._encoder.predict(data).transpose()

    pipe.decomposition = Autoencoder(encoder, decoder)

    encoded_data = pipe.decomposition.transform(data)
    decoded_data = pipe.decomposition.inverse_transform(encoded_data.T)
    pre_error = ((data.T - decoded_data) ** 2).mean(axis=None)
    print(f"MSE: {pre_error}")

    pipe._index = pipe.matrix.index
    pipe._columns = pipe.matrix.columns
    if adjust_weights:
        quant = float(adjust_weights)
        for i, layer in enumerate(encoder.layers):
            W, b = layer.get_weights()
            low, median, high = np.quantile(W.flatten(), [quant, 0.5, 1 - quant])
            W_low = W * (W < low)
            W_high = W * (W > high)
            selected_weights = W_low + W_high
            # oplot([Histogram(x=W.flatten()), Histogram(x=W[W < low].flatten()), Histogram(x=W[W > high].flatten())])
            layer.set_weights([selected_weights, b])
            break
        encoded_data = pipe.decomposition.transform(data)
        decoded_data = pipe.decomposition.inverse_transform(encoded_data.T)
        post_error = ((data.T - decoded_data) ** 2).mean(axis=None)
        print(f"MSE: {post_error}")
    if 'weights' == mode:
        layer = 0
        layer_weights = encoder.layers[layer].get_weights()
        layer = encoder.layers[layer]
        if len(layer_weights) == 0:
            layer_weights = encoder.layers[0].get_weights()
        if len(layer_weights) >= 2:
            layer_weights = layer_weights[:-1]  # last one is bias
        new_data = layer_weights[0]
        index = [f'Weight_{i}' for i in range(new_data.shape[0])]
        num_nodes = new_data.shape[1]
        columns = [f"{layer.name}_{i}" for i in range(num_nodes)]
    elif 'nodes' == mode:
        new_data = encoder.predict(np.vsplit(data, 1)).transpose()
        columns = labels
        index = [f"{mode}_{i}" for i in range(encoding_dim)]
    elif 'discard' == mode:
        W, b = encoder.layers[0].get_weights()
        W = np.sum(np.abs(W), axis=1)
        W[W != 0] = 1
        print(f"Kept {np.sum(W)} weights")
        v: np.array = pipe.matrix.values
        new_data = (v.T * W).T
        columns = pipe.matrix.columns
        index = pipe.matrix.index
    else:
        raise ValueError(f"Unknown mode {mode}")
    pipe.matrix = pd.DataFrame(data=new_data,
                               columns=columns,
                               index=index,
                               )
    return
예제 #22
0
from keras.layers import Conv2D, MaxPooling2D, Activation, Flatten, Dense
from keras.optimizers import SGD

from examples.custom_optimiser import LRMultiplierSGD
from examples.custom_weight_decay import add_weight_decay
from examples.utils import getImageData

num_classes = 10
x_train, y_train, x_test, y_test = getImageData('cifar10', num_classes)

shape = x_train.shape[1:]

kernel_init = keras.initializers.RandomNormal(stddev=0.05)

model = Sequential()
model.add(InputLayer(input_shape=(32, 32, 3)))
# Unit 1
model.add(
    Conv2D(filters=32, kernel_size=(5, 5), kernel_initializer=kernel_init))
model.add(MaxPooling2D(padding='same'))
model.add(Activation('relu'))
# Unit 2
model.add(
    Conv2D(filters=32, kernel_size=(5, 5), kernel_initializer=kernel_init))
model.add(MaxPooling2D(padding='same'))
model.add(Activation('relu'))
# Unit 3
model.add(
    Conv2D(filters=32, kernel_size=(5, 5), kernel_initializer=kernel_init))
model.add(MaxPooling2D(padding='same'))
model.add(Activation('relu'))
예제 #23
0
from pathlib import Path

import numpy as np
from PIL import Image
from keras import Sequential, Input, Model
from keras.engine import InputLayer
from keras.layers import Flatten, Dense, Activation, Conv2D, MaxPooling2D, concatenate, Reshape
from keras.optimizers import Adam

import phpcode_data

model = Sequential()
model.add(
    InputLayer(input_shape=(phpcode_data.IMAGE_HEIGHT,
                            phpcode_data.IMAGE_WIDTH)))
model.add(Reshape((phpcode_data.IMAGE_HEIGHT, phpcode_data.IMAGE_WIDTH, 1)))

# 三组卷积逻辑,每组包括两个卷积层及一个池化层

model.add(
    Conv2D(filters=32,
           kernel_size=5,
           strides=(1, 1),
           padding='same',
           use_bias=True,
           input_shape=(phpcode_data.IMAGE_HEIGHT, phpcode_data.IMAGE_WIDTH,
                        1)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=2, strides=2, padding='same'))

model.add(
예제 #24
0
    preprocessor = prep.StandardScaler().fit(
        dataset)  # 0을 기준으로 정규분포를 만드는 것 같다.
    dataset = preprocessor.transform(dataset)  # 어쨌든 데이터 전처리 과정임
    dataset = dataset.reshape((dataset_samples, dataset_nx,
                               dataset_ny))  # 다시 Feature x 시퀀수 수 모양으로 나눔

    tmp = tmp.reshape((dataset_samples * dataset_nx * dataset_ny, 1))
    send = prep.StandardScaler().fit(tmp)

    return dataset, send  # 전처리된 데이터를 반환함


real_data, send = preprocess_data(loaded_dataset, window)

input_layer = InputLayer(input_shape=(None, None), name="input_1")

model = load_model('my_model(binary).h5')
model.layers[0] = input_layer
model.summary()

Predicted_visitors = []
for pred in range(predict_period):  # 나중에 이 숫자 7은 입력으로 받는걸로 ㄱㄱ
    Predicted_visitors.append(
        model.predict(real_data[[pred]], batch_size=2,
                      verbose=1))  # 예측된 값을 새로운 리스트에 저장
    tmp = Predicted_visitors[pred]  # 예측된 값을 추출
    if pred < predict_period - 1:  # 파일의 끝에 도달할때까지 반복
        real_data[pred + 1, -1, -1] = tmp[0][
            0]  # 예측된 Y를 사용하기 위해 다음날짜의 page_impressions 에 채워넣음
예제 #25
0
def main():
    training_images = load_data(train_data)

    tr_img_data = np.array([i[0]
                            for i in training_images]).reshape(-1, 64, 64, 1)
    tr_lbl_data = np.array([i[1] for i in training_images])

    model = Sequential()

    model.add(InputLayer(input_shape=[64, 64, 1]))
    model.add(
        Conv2D(filters=4,
               kernel_size=5,
               strides=1,
               padding='same',
               activation='relu'))
    model.add(MaxPool2D(pool_size=5, padding='same'))

    model.add(
        Conv2D(filters=8,
               kernel_size=5,
               strides=1,
               padding='same',
               activation='relu'))
    model.add(MaxPool2D(pool_size=5, padding='same'))

    model.add(
        Conv2D(filters=10,
               kernel_size=5,
               strides=1,
               padding='same',
               activation='relu'))
    model.add(MaxPool2D(pool_size=5, padding='same'))

    model.add(
        Conv2D(filters=20,
               kernel_size=5,
               strides=1,
               padding='same',
               activation='relu'))
    model.add(MaxPool2D(pool_size=5, padding='same'))

    model.add(Dropout(rate=0.25))
    model.add(Flatten())
    model.add(Dense(512, activation='relu'))
    model.add(Dropout(rate=0.5))
    model.add(Dense(2, activation='softmax'))

    model.compile(optimizer='rmsprop',
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

    datagen = preprocessing.image.ImageDataGenerator(rotation_range=10,
                                                     width_shift_range=0.2,
                                                     height_shift_range=0.2,
                                                     zoom_range=0.2,
                                                     shear_range=0.2)

    model.fit_generator(datagen.flow(tr_img_data, tr_lbl_data, batch_size=256),
                        epochs=500)
    model.summary()

    # Save the weights
    model.save_weights('trained_model/model_weights.h5')

    # Save the model architecture
    with open('trained_model/model_architecture.json', 'w') as f:
        f.write(model.to_json())

    test()
예제 #26
0
    for i in range(kernels.shape[0]):
        for j in range(kernels.shape[1]):

            new_kernels[i][j] = np.append(kernels[i][j], 0)

    return utils.get_weights(new_kernels)


if __name__ == '__main__':

    model = utils.load_model('Models/Model_12KHz_98%_meanPooling.yaml',
                             'Models/Model_12KHz_98%_meanPooling.h5')

    new_model = Sequential()

    input_layer = InputLayer(input_shape=(None, 1), name="input_new")

    new_model.add(input_layer)

    for layer in model.layers:

        if "Flatten" in str(layer):
            flattened_ipt = True
            f_dim = layer.input_shape

        elif "Dense" in str(layer):

            input_shape = layer.input_shape
            output_dim = layer.get_weights()[1].shape[0]
            W, b = layer.get_weights()
            Wshape = W.shape
예제 #27
0
# freeze(mfcc_dnn.model)
#
# mbh_dnn=Dnn(dataset.sources['mbh'][1], 239, name='DNN_mbh')
# mbh_dnn.load_weights(path="weights/")
# freeze(mbh_dnn.model)
#
# hog_dnn=Dnn(dataset.sources['hog'][1], 239, name='DNN_hog')
# hog_dnn.load_weights(path="weights/")
# freeze(hog_dnn.model)
#
# traj_dnn=Dnn(dataset.sources['traj'][1], 239, name='DNN_traj')
# traj_dnn.load_weights(path="weights/")
# freeze(traj_dnn.model)

cnn_dnn = Sequential()
cnn_dnn.add(InputLayer(input_shape=(239, )))
cnn_scalar = VectorLayer()
cnn_dnn.add(cnn_scalar)
# cnn_dnn.add(Activation('sigmoid'))

mfcc_dnn = Sequential()
mfcc_dnn.add(InputLayer(input_shape=(239, )))
mfcc_scalar = VectorLayer()
mfcc_dnn.add(mfcc_scalar)
# mfcc_dnn.add(Activation('sigmoid'))

mbh_dnn = Sequential()
mbh_dnn.add(InputLayer(input_shape=(239, )))
mbh_scalar = VectorLayer()
mbh_dnn.add(mbh_scalar)
# mbh_dnn.add(Activation('sigmoid'))
예제 #28
0
def run_lstm(Xtr,
             Xte,
             ytr,
             yte,
             max_features,
             max_features2,
             out_size,
             embedding_size,
             hidden_size,
             batch_size,
             epochs=50,
             verbose=0,
             maxsent=0):

    print('Training and testing tensor shapes:', Xtr.shape, Xte.shape,
          ytr.shape, yte.shape)

    mf = max(max_features, max_features2)

    model1 = Sequential()
    model1.add(
        Embedding(input_dim=mf,
                  output_dim=embedding_size,
                  input_length=maxsent,
                  mask_zero=True))

    model2 = Sequential()
    model2.add(InputLayer(input_shape=(maxsent, Xtr.shape[2] - 1)))

    model = Sequential()
    model.add(Merge([model1, model2], mode='concat'))
    model.add(Dense(1))

    model.add(
        LSTM(hidden_size,
             return_sequences=True,
             input_shape=(maxsent, Xtr.shape[2] - 1)))
    model.add(TimeDistributed(Dense(out_size)))
    model.add(Activation('softmax'))
    print 'compile...'
    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])
    #print(model.summary())
    print('train...')

    model.fit([Xtr[:, :, 0], Xtr[:, :, 1:Xtr.shape[2]]],
              ytr,
              epochs=epochs,
              verbose=verbose,
              batch_size=batch_size,
              validation_data=([Xte[:, :, 0], Xte[:, :, 1:Xtr.shape[2]]], yte))
    score = model.evaluate([Xte[:, :, 0], Xte[:, :, 1:Xtr.shape[2]]],
                           yte,
                           batch_size=batch_size,
                           verbose=verbose)

    print('Raw test score:', score)
    pr = model.predict_classes([Xtr[:, :, 0], Xtr[:, :, 1:Xtr.shape[2]]],
                               verbose=verbose)
    yh = ytr.argmax(2)  # no encoding
    fyh, fpr = score2(yh, pr)
    print('Training...')
    print(' - accuracy:', accuracy_score(fyh, fpr))
    print(' - confusion matrix:')
    print(confusion_matrix(fyh, fpr))
    print(' - precision, recall, f1, support:')
    print precision_recall_fscore_support(fyh, fpr)

    pr = model.predict_classes([Xte[:, :, 0], Xte[:, :, 1:Xte.shape[2]]],
                               verbose=verbose)
    yh = yte.argmax(2)
    fyh, fpr = score2(yh, pr)
    print('Testing...')
    print(' - accuracy:', accuracy_score(fyh, fpr))
    print(' - confusion matrix:')
    print(confusion_matrix(fyh, fpr))
    print(' - precision, recall, f1, support:')
    print precision_recall_fscore_support(fyh, fpr)
    print(
        '----------------------------------------------------------------------------------'
    )
예제 #29
0
from keras.models import load_model
from keras.layers import Input, Dense
from tensorflow import Tensor
from keras import backend as K
from keras.engine import InputLayer

model = load_model('model_checkpoint.hdf5')

for layer in model.layers:
    print(layer)

input_layer1 = InputLayer(input_shape=(51, 68, 3), name="input_1")
input_layer2 = InputLayer(input_shape=(51, 68, 3), name="input_2")
print("input shape:", input_layer1.input_shape)
print("input tensor:", input_layer1.input)
print("name:", input_layer1.name)
print("sparse:", input_layer1.sparse)
print("dtype:", input_layer1.dtype)

model.layers[0] = input_layer1
model.layers[1] = input_layer2
model.save("reshaped-model.h5")

import coremltools
coreml_model = coremltools.converters.keras.convert(
    'reshaped-model.h5',
    is_bgr=True,
    input_names=['image1', 'image2'],
    image_input_names=['image1', 'image2'],
    output_names=['output'],
    blue_bias=-103.939,
예제 #30
0
def _build_encoder():
    model = Sequential(name='encoder')
        model.add(InputLayer(input_shape=(None, None, 1)))