示例#1
0
    def __disc_img_first_part(self):
        model = Sequential()
        #model.add(Reshape((self.width*self.height*self.channels,), input_shape=(self.height, self.width, self.channels)))
        model.add(InputLayer(input_shape=(self.width, self.height, self.channels)))
        model.add(Conv2D(filters=32, kernel_size=3, padding='same'))
        model.add(BatchNormalization(momentum=0.9))
        model.add(Activation('relu'))

        model.add(Conv2D(filters=64, kernel_size=3, padding='same'))
        model.add(BatchNormalization(momentum=0.9))
        model.add(Activation('relu'))

        model.add(MaxPooling2D(pool_size=2))

        model.add(Conv2D(filters=128, kernel_size=3, padding='same'))
        model.add(BatchNormalization(momentum=0.9))
        model.add(Activation('relu'))

        model.add(MaxPooling2D(pool_size=2))

        model.add(Flatten())

        return model
示例#2
0
def create_cnn_model(img_size):
    input_num_units = (img_size, img_size, 3)
    pool_size = (2, 2)
    hidden_num_units = 500
    output_num_units = 3
    model = Sequential([
        InputLayer(input_shape=input_num_units),
        Conv2D(25, (5, 5), activation='relu'),
        MaxPooling2D(pool_size=pool_size),
        Conv2D(25, (5, 5), activation='relu'),
        MaxPooling2D(pool_size=pool_size),
        Conv2D(25, (4, 4), activation='relu'),
        Flatten(),
        Dense(units=hidden_num_units, activation='relu'),
        Dense(units=output_num_units,
              input_dim=hidden_num_units,
              activation='softmax'),
    ])
    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])

    return model
示例#3
0
 def net(self):
     model = Sequential()
     model.add(InputLayer(input_shape=(None, None, 1)))
     model.add(Conv2D(64, (3, 3), activation='relu', padding='same'))
     model.add(
         Conv2D(64, (3, 3), activation='relu', padding='same', strides=2))
     model.add(Conv2D(128, (3, 3), activation='relu', padding='same'))
     model.add(
         Conv2D(128, (3, 3), activation='relu', padding='same', strides=2))
     model.add(Conv2D(256, (3, 3), activation='relu', padding='same'))
     model.add(
         Conv2D(256, (3, 3), activation='relu', padding='same', strides=2))
     model.add(Conv2D(512, (3, 3), activation='relu', padding='same'))
     model.add(Conv2D(256, (3, 3), activation='relu', padding='same'))
     model.add(Conv2D(128, (3, 3), activation='relu', padding='same'))
     model.add(UpSampling2D((2, 2)))
     model.add(Conv2D(64, (3, 3), activation='relu', padding='same'))
     model.add(UpSampling2D((2, 2)))
     model.add(Conv2D(32, (3, 3), activation='relu', padding='same'))
     model.add(Conv2D(2, (3, 3), activation='tanh', padding='same'))
     model.add(UpSampling2D((2, 2)))
     model.compile(optimizer='rmsprop', loss='mse')
     return model
示例#4
0
def merge_with_taken(model, x_without_first, rc0 = 10):
  """ Create a new model taking rc0-shaped inputs which are then merged with x_without_first from @see split_x_rc0 """
  # input with size just d
  input_tensor = InputLayer(input_shape = (rc0,))

  # side of the original image
  n = int(model.inputs[0].shape[1])

  # creating a new model
  model_upscale = Sequential()
  model_upscale.add(input_tensor)

  # adding all x but first red column
  model_upscale.add(Lambda(lambda y : tf.pad(tf.reshape(y, shape = (-1, rc0, 1, 1)),
                                           paddings = ([0, 0], [0, n - rc0], [0, n - 1], [0, 2])) + x_without_first))

  # adding the rest of VGG15
  model_upscale.add(model)

  # sanity check: when fed x_taken, will reconstruct the input correctly
  #assert np.all(K.function([model_upscale.input], [model_upscale.layers[0].output])([x_taken])[0] == x_orig), "Input is handled in a wrong way"

  return model_upscale
def stack1(data, target_shape, l2=1e-3, shared=True):
    model = Sequential()
    model.add(
        InputLayer(name='numeric_columns__',
                   input_shape=[len(data.numeric_columns)]))
    model.add(Reshape([-1, 6]))
    model.add(Permute((2, 1)))
    if shared:
        model.add(
            Conv1D(1,
                   1,
                   kernel_regularizer=regularizers.l2(l2),
                   kernel_initializer='zero',
                   activation='sigmoid'))
    else:
        model.add(
            LocallyConnected1D(1,
                               1,
                               kernel_regularizer=regularizers.l2(l2),
                               kernel_initializer='zero',
                               activation='sigmoid'))
    model.add(Reshape([6]))
    return model
示例#6
0
def get_model():
    return Sequential([
        # encoder starts here
        InputLayer(input_shape=(200, 200, 3)),
        Conv2D(16, (4, 4), activation='relu', padding='same'),
        MaxPooling2D((2, 2), padding='same'),
        Conv2D(8, (3, 3), activation='relu', padding='same'),
        MaxPooling2D((2, 2), padding='same'),
        Conv2D(8, (3, 3), activation='relu', padding='same'),
        MaxPooling2D((2, 2), padding='same'),
        Flatten(),
        Dense(1000, activation='relu'),
        # bottleneck
        Dense(5000, activation='relu'),
        Reshape((25, 25, 8)),
        Conv2D(8, (4, 4), activation='relu', padding='same'),
        UpSampling2D((2, 2)),
        Conv2D(8, (3, 3), activation='relu', padding='same'),
        UpSampling2D((2, 2)),
        Conv2D(16, (3, 3), activation='relu', padding='same'),
        UpSampling2D((2, 2)),
        Conv2D(3, (3, 3), activation='relu', padding='same')
    ])
    def train(self, training_data):
        self.n = len(max(training_data, key=len))
        self.N = len(training_data)
        input_size = np.zeros((self.N, self.n), dtype='int32')
        output_size = np.zeros((self.N, self.n), dtype='int32')

        iter = 0
        for tagged_sentence in training_data:
            train_sentences_num, train_tags_num = [], []
            for word, tag in tagged_sentence:
                try:
                    train_sentences_num.append(w2i[word])
                except KeyError:
                    train_sentences_num.append(w2i[b'UNW123'])

                train_tags_num.append(t2i[tag])
            train_sentences_num_paded = sequence.pad_sequences(
                [train_sentences_num], maxlen=self.n)
            train_tags_num_padded = sequence.pad_sequences([train_tags_num],
                                                           maxlen=self.n)
            input_size[iter, :] = train_sentences_num_paded
            output_size[iter, :] = train_tags_num_padded

            iter = iter + 1
        output_size = keras.utils.to_categorical(output_size,
                                                 num_classes=len(t2i))

        self.model.add(InputLayer(input_shape=(self.n, )))
        self.model.add(Embedding(len(w2i), 100, mask_zero=True))
        self.model.add(Bidirectional(LSTM(50, return_sequences=True)))
        self.model.add(Dense(len(t2i), activation='softmax'))
        self.model.compile(loss='categorical_crossentropy',
                           optimizer='adam',
                           metrics=['accuracy'])
        print(self.model.summary())
        self.model.fit(input_size, output_size, batch_size=32, epochs=4)
        pass
示例#8
0
    def add_noise_layer(self, layer_name):
        #        previous_layer_name="maeno layer no namae"
        #        before_noise_layer = self.model.layers[layer_name]
        print("before modeling: self.model.get_layer(block1_conv2).output = ",
              self.model.get_layer("block1_conv2").output)

        #        model_layers = self.model.layers[1:]
        model_copy = Sequential()
        model_copy.add(InputLayer(input_shape=self.model.input_shape[1:]))
        model_layers = []
        for layer in self.model.layers[1:]:
            try:
                model_layers.extend(layer.layers)
            except:
                model_layers.append(layer)
        print("model_layers")
        #        for i, layer in enumerate(self.model.layers[1:]):
        for i, layer in enumerate(model_layers):
            layer_config = layer.get_config()
            #            if type(layer_config) == list:
            #                print(len(layer_config))
            #                print(layer.layers)
            #            print("type(layer) =", type(layer))
            #            print("layer_config =", layer_config)
            #            model_copy.add(type(layer)(**layer_config))
            #            model_copy.add(type(layer)(**{'filters':layer_config["filters"], 'kernel_size':layer_config["kernel_size"]}))
            #            model_copy.add(type(layer)(filters=layer_config["filters"], kernel_size=layer_config["kernel_size"]))
            model_copy.add(
                type(layer)(**layer_config, weights=layer.get_weights()))
            if layer.name == layer_name:
                model_copy.add(
                    noise_layer(noiselevel=self.noiselevel, name="noise"))
        if int(self.nb_gpus) > 1:
            self.model_multiple_gpu = multi_gpu_model(model_copy,
                                                      gpus=self.nb_gpus)
        else:
            self.model_multiple_gpu = model_copy
示例#9
0
def construct_hrs_model(dataset,
                        model_indicator,
                        blocks_definition,
                        load_weights=True):
    # get structure from model_indicator
    structure = [int(ss[:-1]) for ss in model_indicator.split('[')[1:]]
    nb_block = len(structure)

    # assert nb blocks
    assert len(structure) == len(
        blocks_definition
    ), 'arg structure and block_definition need to have the same length'

    # assert weights exist
    weights_dir = './Model/%s_Models/%s' % (dataset, model_indicator)
    assert os.path.exists(weights_dir), '%s does not exist' % weights_dir

    # input
    img_rows, img_cols, img_channels = get_dimensions(dataset)
    model_input = InputLayer(input_shape=(img_rows, img_cols, img_channels))
    save_dir = './Model/%s_models/' % dataset + model_indicator + '/'

    # loop over block
    block_input = model_input.output
    for i in range(nb_block):
        weight_dir = save_dir + '%d_' % i + '%d'
        block_output = construct_switching_block(
            input=block_input,
            nb_channels=structure[i],
            channel_definition=blocks_definition[i],
            weights=weight_dir)
        block_input = block_output

    # construct Model object
    model = Model(input=model_input.input, output=block_output)

    return model
示例#10
0
def q_learning_keras(env, num_episodes=2000):
    # create the keras model
    model = Sequential()
    model.add(InputLayer(batch_input_shape=(1, 4)))
    model.add(Dense(30, activation='sigmoid'))
    model.add(Dense(3, activation='linear'))
    model.compile(loss='mse', optimizer='adam', metrics=['mae'])
    # now execute the q learning
    y = 0.95
    eps = 0.5
    decay_factor = 0.999
    r_avg_list = []
    for i in range(num_episodes):
        s = env.reset()
        eps *= decay_factor
        if i % 100 == 0:
            print("Episode {} of {}".format(i + 1, num_episodes))
        done = False
        r_sum = 0
        while not done:
            if np.random.random() < eps:
                a = np.random.randint(0, 3)
            else:
                a = np.argmax(model.predict(np.array([s])))
            new_s, r, done = env.DO(a)
            target = r + y * np.amax(model.predict(np.array([new_s])))
            target_vec = model.predict(np.array([s]))[0]
            target_vec[a] = target
            model.fit(np.array([s]), target_vec.reshape(-1, 3), epochs=1, verbose=0)
            s = new_s
            r_sum += r
        r_avg_list.append(r_sum)
    plt.plot(r_avg_list)
    plt.ylabel('Average reward per game')
    plt.xlabel('Number of games')
    plt.show()
    return model
    def basic_transferlearning(self, train, train_label, val, val_label,
                               input_shape, model_name):

        vgg_model = self.import_vgg_model(input_shape)

        train_features = self.get_bottleneck_features(
            vgg_model, self.scale_bottleneck(train))
        val_features = self.get_bottleneck_features(vgg_model,
                                                    self.scale_bottleneck(val))

        input_shape = vgg_model.output_shape[1]

        model = Sequential()
        model.add(InputLayer(input_shape=(input_shape, )))
        model.add(Dense(512, activation='relu', input_dim=input_shape))
        model.add(Dropout(0.3))
        model.add(Dense(512, activation='relu'))
        model.add(Dropout(0.3))

        if (self.total_classes):
            model.add(Dense(self.total_classes, activation="softmax"))
        else:
            model.add(Dense(1, activation='sigmoid'))

        model.compile(loss='binary_crossentropy',
                      optimizer=optimizers.RMSprop(lr=1e-4),
                      metrics=['accuracy'])

        model.summary()
        history = model.fit(x=train_features,
                            y=train_label,
                            validation_data=(val_features, val_label),
                            batch_size=self.batch_size,
                            epochs=100,
                            verbose=1)
        model.save("basic_transferlearning_" + model_name + ".h5")
        return history
    def createNetwork(self, input_layer=None):
        model = Sequential()

        model.add(input_layer or InputLayer(Config.input_shape))

        # convolutional part
        for l in self.conv_layers:
            if type(l) is ConvLayer:
                model.add(
                    Conv2D(l.filters, (l.kernel_size, l.kernel_size),
                           padding='same'))
                model.add(Activation(l.activation))
            elif type(l) is MaxPoolLayer:
                # check if pooling is possible
                if model.output_shape[1] >= l.pool_size and model.output_shape[
                        2] >= l.pool_size:
                    model.add(
                        MaxPooling2D(pool_size=(l.pool_size, l.pool_size)))
            else:
                raise TypeError("unknown type of layer")

        # dense part
        model.add(Flatten())
        for l in self.dense_layers:
            model.add(Dense(l.size))
            model.add(Activation(l.activation))
            if l.dropout > 0:
                model.add(Dropout(l.dropout))

        # final part
        model.add(Dense(self.noutputs))
        if Config.task_type == "classification":
            model.add(Activation('softmax'))

        self.nparams = model.count_params()

        return model
    def build(self):

        model = Sequential()
        model.add(InputLayer(input_shape=(INPUT_COUNT, 1), name="in"))

        encoder = Sequential(name="encoder")
        add_pool_convolution(encoder, 4)
        add_pool_convolution(encoder, 4)
        add_pool_convolution(encoder, 8)
        add_pool_convolution(encoder, 8)
        add_pool_convolution(encoder, 32)
        add_pool_convolution(encoder, 4)
        add_pool_convolution(encoder, 4)
        add_pool_convolution(encoder, 8)
        add_pool_convolution(encoder, 8)
        add_pool_convolution(encoder, 128)
        model.add(encoder)

        decoder = Sequential(name="decoder")
        add_upsampling_convolution(decoder, 128)
        add_upsampling_convolution(decoder, 8)
        add_upsampling_convolution(decoder, 8)
        add_upsampling_convolution(decoder, 4)
        add_upsampling_convolution(decoder, 4)
        add_upsampling_convolution(decoder, 32)
        add_upsampling_convolution(decoder, 8)
        add_upsampling_convolution(decoder, 8)
        add_upsampling_convolution(decoder, 4)
        add_upsampling_convolution(decoder, 4)
        add_convolution(decoder, 1)
        model.add(decoder)

        model.compile(optimizer="adam", loss="mse", metrics=["acc"])
        self.model = model

        self.build_encoder()
        self.build_decoder()
示例#14
0
    def create_model_1d(cfg):
        pool_size = cfg['pool_size']  # size of pooling area for max pooling
        kernel_size = cfg['kernel_size']  # convolution kernel size
        input_shape = (23, 3 * (2 * cfg['feature_context'] + 1))
        # Keras Model
        model = Sequential()

        model.add(InputLayer(batch_input_shape=(None, input_shape[0], input_shape[1]), name='input'))
        model.add(GaussianNoise(stddev=cfg['gaussian_noise']))

        for i in range(cfg['num_cnn_layers']):
            model.add(Conv1D(filters=cfg['filters'], kernel_size=kernel_size,
                             padding=cfg['padding'],
                             kernel_initializer='he_normal',
                             kernel_regularizer=regularizers.l2(cfg['weight_decay'])))
            model.add(Activation(activation=cfg['activation']))
            if cfg['batch_normalization']:
                model.add(BatchNormalization())
            model.add(MaxPooling1D(pool_size=pool_size))
            model.add(Dropout(cfg['dropout']))

        model.add(Flatten())

        for i in range(cfg['num_ff_layers']):
            model.add(Dense(cfg['ff_layer_size']))
            model.add(Activation(cfg['activation']))
            if cfg['batch_normalization']:
                model.add(BatchNormalization())
            model.add(Dropout(cfg['dropout']))

        model.add(Dense(cfg['output_dim']))
        model.add(BatchNormalization())
        # Optional softmax layer
        if cfg['task'] == 'classification':
            model.add(Softmax())

        return model
示例#15
0
def createCDNet(summary=False, output_layers=8):
    print("Start Initialzing Neural Network!")
    model = Sequential()

    model.add(InputLayer(input_shape=(2, 75)))

    model.add(LSTM(256, return_sequences=True))
    model.add(Dropout(0.25))
    model.add(BatchNormalization())

    model.add(Dense(256))
    model.add(Dropout(0.2))
    model.add(BatchNormalization())

    model.add(LSTM(128, return_sequences=True))
    model.add(Dropout(0.25))
    # model.add(BatchNormalization())

    model.add(Dense(128))
    model.add(Dropout(0.2))
    model.add(BatchNormalization())

    model.add(Dense(64, activation='softmax'))
    model.add(Dropout(0.2))
    model.add(BatchNormalization())

    model.add(Dense(32, activation='softmax'))
    model.add(Dropout(0.2))
    model.add(BatchNormalization())

    model.add(Flatten())

    model.add(Dense(output_layers, activation='softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='adadelta', metrics=['accuracy'])
    if summary:
        print(model.summary())
    return model
    def create_model(self, input_shape):
        model = Sequential()
        model.add(InputLayer(input_shape=input_shape))
        model.add(
            Conv2D(64, (3, 3), activation='relu',
                   data_format='channels_first'))
        model.add(MaxPooling2D(pool_size=(2, 2), data_format='channels_first'))
        model.add(BatchNormalization())
        model.add(Dropout(0.2))

        model.add(
            Conv2D(128, (3, 3),
                   activation='relu',
                   data_format='channels_first'))
        model.add(MaxPooling2D(pool_size=(2, 2), data_format='channels_first'))
        model.add(BatchNormalization())
        model.add(Dropout(0.2))

        model.add(
            Conv2D(256, (3, 3),
                   activation='relu',
                   data_format='channels_first'))
        model.add(MaxPooling2D(pool_size=(2, 2), data_format='channels_first'))
        model.add(BatchNormalization())
        model.add(Dropout(0.2))

        model.add(Flatten())

        model.add(Dense(256, activation='relu'))
        model.add(Dropout(0.2))
        model.add(Dense(7, activation='linear'))
        model.summary()
        model.compile(loss='mse',
                      optimizer=Adam(lr=1e-4),
                      metrics=['accuracy'])

        return model
示例#17
0
def Bi_RNN2(word_index, embedding_matrix, embedding_dim=50):
    model = Sequential()
    model.add(InputLayer(input_shape=(49, )))
    model.add(
        Embedding(len(word_index) + 1,
                  embedding_dim,
                  weights=[embedding_matrix],
                  input_length=49,
                  name="embedding",
                  trainable=True))
    model.add(SpatialDropout1D(0.4))
    model.add(Bidirectional(CuDNNLSTM(64, return_sequences=True)))
    model.add(Bidirectional(CuDNNLSTM(32)))
    model.add(Dropout(0.5))
    model.add(Dense(1, activation='sigmoid'))
    model.compile(loss='binary_crossentropy',
                  optimizer='adam',
                  metrics=['acc'])

    # try using different optimizers and different optimizer configs
    model.compile(loss='binary_crossentropy',
                  optimizer='adam',
                  metrics=['acc'])
    return model
示例#18
0
    def __init__(self, name, state_shape, n_actions, epsilon=0, reuse=False):
        """A simple DQN agent"""
        self.n_actions = n_actions
        self.name = name
        self.filename = '%s.h5' % self.name

        with tf.variable_scope(name, reuse=reuse):
            self.model = Sequential()
            self.model.add(InputLayer(state_shape))
            self.model.add(Dense(256, activation='relu'))
            # self.model.add(Dense(192, activation='relu'))
            self.model.add(Dense(128, activation='relu'))
            self.model.add(Dense(64, activation='relu'))
            self.model.add(Dense(n_actions, activation='linear'))

            # prepare a graph for agent step
            self.state_t = tf.placeholder('float32', [
                None,
            ] + list(state_shape))
            self.qvalues_t = self.get_symbolic_qvalues(self.state_t)

        self.weights = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
                                         scope=name)
        self.epsilon = epsilon
示例#19
0
def create_model():

    from keras.layers import (ELU, Activation, BatchNormalization, Dense,
                              Dropout, InputLayer, Flatten, Reshape, LSTM,
                              TimeDistributed)
    from keras.models import Sequential
    from keras.utils import plot_model

    from PIL import Image
    """ ----------------------------- VANILLA ARTIFICIAL NEURAL NETWORK ----------------------------- """
    model = Sequential()
    model.add(InputLayer(input_shape=(1, )))
    model.add(Dense(10, activation='relu'))
    model.add(Dense(60, activation='relu'))
    model.add(Reshape((10, 6)))
    model.add(Dense(10, activation='softmax'))

    # model.compile(loss='mse', optimizer='adam', metrics=['mae'])

    # model = Sequential()
    # model.add(LSTM(100, input_shape=(10,6), return_sequences=True))
    # model.add(LSTM(500, return_sequences=True))
    # model.add(LSTM(250, return_sequences=True))
    # model.add(LSTM(125, return_sequences=True))
    # model.add(LSTM(10))
    # model.add(Dropout(rate=0.25))
    # model.add(Dense(1))
    # model.add(Activation('softmax'))

    model.compile(loss='mean_squared_error',
                  optimizer='adam',
                  metrics=['accuracy'])

    plot_model(model, 'model.png', show_layer_names=True, show_shapes=True)
    Image.open('model.png').show()
    return model
示例#20
0
def main(verbose):
    (x_train, y_train), _ = mnist.load_data()
    x_train = x_train / 255
    x_train = x_train[..., None]
    y_train = to_categorical(y_train)

    model = Sequential([
        InputLayer([28, 28, 1]),
        Conv2D(32, 5, padding="same", activation="relu"),
        MaxPool2D(),
        Conv2D(64, 5, padding="same", activation="relu"),
        MaxPool2D(),
        Flatten(),
        Dense(512, activation="relu"),
        Dropout(0.5),
        Dense(10, activation="softmax")
    ])

    model.compile("adam", "categorical_crossentropy")

    # measure

    model.fit(x_train,
              y_train,
              batch_size=32,
              epochs=1,
              verbose=verbose,
              shuffle=False)
    start = time.time()
    model.fit(x_train,
              y_train,
              batch_size=32,
              epochs=1,
              verbose=verbose,
              shuffle=False)
    print(f"Elapsed time: {time.time()-start}")
示例#21
0
def modeloDNN(args,
              mode="regression",
              num_hidden_dense_layers=2,
              hidden_dense_layers_units=20,
              length=50,
              nfeatures=7):
    if mode == "classification":
        last_activation = "softmax"
        last_units = 3
    else:
        last_activation = "linear"
        last_units = 1
    # print(f'modeloCNNBasico... conv_size: {conv_size}')
    model = Sequential()
    model.add(InputLayer(input_shape=(length, nfeatures)))

    for i in range(num_hidden_dense_layers):
        model.add(Dense(hidden_dense_layers_units, activation=args.activation))
        model.add(BatchNormalization())
    model.add(Dense(hidden_dense_layers_units, activation=args.activation))
    model.add(Flatten())
    model.add(Dense(last_units, activation=last_activation))

    return model
示例#22
0
def nsfsrcnn(x, d=56, s=12, m=4, scale=3, pos=1):
    """Build an FSRCNN model, but change deconv position.

    See https://arxiv.org/abs/1608.00367
    """
    model = Sequential()
    model.add(InputLayer(input_shape=x.shape[-3:]))
    c = x.shape[-1]
    f1 = [5, 1] + [3] * pos
    n1 = [d, s] + [s] * pos
    f2 = [3] * (m - pos - 1) + [1]
    n2 = [s] * (m - pos - 1) + [d]
    f3 = 9
    n3 = c
    for ni, fi in zip(n1, f1):
        model.add(
            Conv2D(ni,
                   fi,
                   padding='same',
                   kernel_initializer='he_normal',
                   activation='relu'))
    model.add(
        Conv2DTranspose(s,
                        3,
                        strides=scale,
                        padding='same',
                        kernel_initializer='he_normal'))
    for ni, fi in zip(n2, f2):
        model.add(
            Conv2D(ni,
                   fi,
                   padding='same',
                   kernel_initializer='he_normal',
                   activation='relu'))
    model.add(Conv2D(n3, f3, padding='same', kernel_initializer='he_normal'))
    return model
示例#23
0
def fsrcnn(x, d=56, s=12, m=4, scale=3):
    """Build an FSRCNN model.

    See https://arxiv.org/abs/1608.00367
    """
    model = Sequential()
    model.add(InputLayer(input_shape=x.shape[-3:]))
    c = x.shape[-1]
    f = [5, 1] + [3] * m + [1]
    n = [d, s] + [s] * m + [d]
    for ni, fi in zip(n, f):
        model.add(
            Conv2D(ni,
                   fi,
                   padding='same',
                   kernel_initializer='he_normal',
                   activation='relu'))
    model.add(
        Conv2DTranspose(c,
                        9,
                        strides=scale,
                        padding='same',
                        kernel_initializer='he_normal'))
    return model
示例#24
0
def create_cnn_model(): 
	cnn = Sequential()
	
	# Add input layer
	cnn.add(InputLayer(input_shape=input_shape))
	
	# Normalization
	cnn.add(BatchNormalization())
	
	# Conv + Maxpooling
	cnn.add(Conv2D(conv_kernels_1, (kernel_size, kernel_size), padding="same",activation="relu"))
	cnn.add(MaxPooling2D((pool_size,pool_size)))
	# Dropout
	cnn.add(Dropout(drop_prop_1))
	
	cnn.add(Conv2D(conv_kernels_2, (kernel_size, kernel_size)))
	cnn.add(MaxPooling2D((pool_size,pool_size)))
	
	# Dropout
	cnn.add(Dropout(drop_prop_2))
	
	cnn.add(Conv2D(conv_kernels_2, (kernel_size, kernel_size)))
	cnn.add(MaxPooling2D((pool_size,pool_size)))
	
	# Cannot add more layers as the size is less than kernel_size
	#cnn.add(Conv2D(conv_kernels_2, (kernel_size, kernel_size)))
	#cnn.add(MaxPooling2D((pool_size,pool_size)))
	
	cnn.add(Flatten())
	cnn.add(Dense(dense_size, activation='relu'))
	cnn.add(Dropout(drop_prop_2))
	cnn.add(Dense(dense_size//2, activation='relu'))
	cnn.add(Dropout(drop_prop_2))
	cnn.add(Dense(n_classes, activation='softmax'))
	cnn.compile(loss='categorical_crossentropy', metrics=['accuracy'],optimizer="Adam")
	return cnn
示例#25
0
def vgg9CNN(input_shape, outclass, sigma='sigmoid'):

    model = None
    model = Sequential()
    model.add(InputLayer(input_shape=input_shape))

    for i in [64, 96, 128]:
        model.add(Conv2D(i, (3, 3), padding='valid'))
        model.add(BatchNormalization())
        model.add(Activation('relu'))
        model.add(Conv2D(i, (3, 3), padding='valid'))
        model.add(BatchNormalization())
        model.add(Activation('relu'))
        model.add(MaxPooling2D(pool_size=(3, 3), strides=(2, 2)))

    model.add(Flatten())
    for i in range(2):
        model.add(Dense(1024, activation='relu'))
        model.add(Dropout(0.5))

    model.add(Dense(outclass))
    model.add(Activation(sigma))

    return model
示例#26
0
def train(train_generator,train_size,input_num,dims_num):
    print("Start Train Job! ")
    start=time.time()
    inputs=InputLayer(input_shape=(input_num,dims_num),batch_size=batch_size)
    layer1=Dense(100,activation="relu")
    layer2=Dense(20,activation="relu")
    flatten=Flatten()
    layer3=Dense(2,activation="softmax",name="Output")
    optimizer=Adam()
    model=Sequential()
    model.add(inputs)
    model.add(layer1)
    model.add(Dropout(0.5))
    model.add(layer2)
    model.add(Dropout(0.5))
    model.add(flatten)
    model.add(layer3)
    call=TensorBoard(log_dir=log_dir,write_grads=True,histogram_freq=1)
    model.compile(optimizer,loss="categorical_crossentropy",metrics=["accuracy"])
    model.fit_generator(train_generator,steps_per_epoch=train_size//batch_size,epochs=epochs_num,callbacks=[call])
#    model.fit_generator(train_generator, steps_per_epoch=5, epochs=5, callbacks=[call])
    model.save(model_dir)
    end=time.time()
    print("Over train job in %f s"%(end-start))
示例#27
0
def build_model():
    model = Sequential()

    model.add(RandomRotation(factor=0.45))
    model.add(RandomFlip(mode='horizontal'))

    model.add(InputLayer(input_shape=(224, 224, 1)))

    model.add(Conv2D(1, (2, 2), activation='relu', padding='same'))

    model.add(Conv2D(32, (2, 2), strides=2, activation='relu', padding='same'))

    model.add(Conv2D(64, (2, 2), strides=2, activation='relu', padding='same'))

    model.add(Conv2D(128, (2, 2), strides=2, activation='relu',
                     padding='same'))

    model.add(
        Conv2DTranspose(128, (2, 2),
                        strides=2,
                        activation='relu',
                        padding='same'))

    model.add(
        Conv2DTranspose(64, (2, 2),
                        strides=2,
                        activation='relu',
                        padding='same'))

    model.add(
        Conv2DTranspose(2, (2, 2),
                        strides=2,
                        activation='relu',
                        padding='same'))

    return model
示例#28
0
    def init_model(self):

        self.model = Sequential()
        self.model.add(InputLayer(input_shape=(self.state_num,
                                               *self.env_size)))
        self.model.add(
            Convolution2D(16,
                          4,
                          4,
                          border_mode='same',
                          activation='relu',
                          subsample=(2, 2)))
        self.model.add(
            Convolution2D(32,
                          2,
                          2,
                          border_mode='same',
                          activation='relu',
                          subsample=(1, 1)))
        self.model.add(
            Convolution2D(32,
                          2,
                          2,
                          border_mode='same',
                          activation='relu',
                          subsample=(1, 1)))
        self.model.add(Flatten())
        self.model.add(Dense(128, activation='relu'))
        self.model.add(Dense(self.n_actions, activation='linear'))

        optimizer = RMSprop if not self.use_graves else RMSpropGraves
        self.model.compile(loss=loss_func,
                           optimizer=optimizer(lr=self.learning_rate),
                           metrics=['accuracy'])

        self.target_model = copy.copy(self.model)
示例#29
0
def q_learning_with_keras(env, num_episodes=500):
    model = Sequential()
    model.add(InputLayer(batch_input_shape=(1, 5)))
    model.add(Dense(10, activation='sigmoid'))
    model.add(Dense(2, activation='linear'))
    model.compile(loss='mse', optimizer='adam', metrics=['mae'])

    y = 0.95
    eps = 0.5
    decay_factor = 0.999
    r_avg_list = []
    for i in range(num_episodes):
        s = env.reset()
        eps *= decay_factor
        if i % 100 == 0:
            print("Episode {} of {}".format(i + 1, num_episodes))
        done = False
        r_sum = 0
        while not done:
            if np.random.random() < eps:
                a = np.random.randint(0, 2)
            else:
                a = np.argmax(model.predict(np.identity(5)[s:s + 1]))
            new_s, r, done, _ = env.step(a)
            target = r + y * np.max(
                model.predict(np.identity(5)[new_s:new_s + 1]))
            target_vec = model.predict(np.identity(5)[s:s + 1])[0]
            target_vec[a] = target
            model.fit(np.identity(5)[s:s + 1],
                      target_vec.reshape(-1, 2),
                      epochs=1,
                      verbose=0)
            s = new_s
            r_sum += r
        r_avg_list.append(r_sum / 1000)
    return model
示例#30
0
    def build(self):
        print("############### BUILDING MODEL ####################")
        model = Sequential()
        model.add(InputLayer(input_shape=(LAYERS, self.height, self.width)))

        if self.width > 5 or self.height > 5:
            model.add(Conv2D(32, (3, 3), strides=(1, 1), padding='same'))
            model.add(Activation('relu'))

        model.add(Flatten())
        model.add(Dense(512))
        model.add(Activation('relu'))
        model.add(Dense(ACTIONS))

        filename = self.filebase + ".hd5"
        if os.path.isfile(filename):
            print("loading prior weights")
            model.load_weights(filename)

        adam = Adam(lr=LEARNING_RATE)
        model.compile(loss='mse', optimizer=adam)

        self.model = model
        print("###############     DONE      ####################")