예제 #1
0
    def build_discriminator(self):

        model = Sequential()

        model.add(
            Conv2D(32,
                   kernel_size=3,
                   strides=2,
                   input_shape=self.img_shape,
                   padding="same"))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.25))
        model.add(Conv2D(64, kernel_size=3, strides=2, padding="same"))
        model.add(ZeroPadding2D(padding=((0, 1), (0, 1))))
        model.add(BatchNormalization(momentum=0.8))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.25))
        model.add(Conv2D(128, kernel_size=3, strides=2, padding="same"))
        model.add(BatchNormalization(momentum=0.8))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.25))
        model.add(Conv2D(256, kernel_size=3, strides=1, padding="same"))
        model.add(BatchNormalization(momentum=0.8))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.25))
        model.add(Flatten())
        model.add(Dense(1, activation='sigmoid'))

        # model.summary()

        img = Input(shape=self.img_shape)
        validity = model(img)

        return Model(img, validity)
예제 #2
0
    def __init__(self, restore = None, session=None, use_softmax=False):
        self.num_channels = 1
        self.image_size = 28
        self.num_labels = 10
        self.shape = [None, 28, 28, self.num_channels]
        model = Sequential()
        kernel_size = (5, 5)
        drop_rate = 0.3
        model.add(Conv2D(32, kernel_size, activation='relu',
                                padding='same', name='block1_conv1', input_shape=(28,28,1)))  # 1
        model.add(MaxPooling2D(pool_size=(2, 2), name='block1_pool1'))  # 2
        model.add(Dropout(drop_rate))

        # block2
        model.add(Conv2D(64, kernel_size, activation='relu', padding='same', name='block2_conv1'))  # 4
        model.add(MaxPooling2D(pool_size=(2, 2), name='block2_pool1'))  # 5
        model.add(Dropout(drop_rate))

        model.add(Flatten(name='flatten'))

        model.add(Dense(120, activation='relu', name='fc1'))  # -5
        model.add(Dropout(drop_rate))
        model.add(Dense(84, activation='relu', name='fc2'))  # -3
        model.add(Dense(10, name='before_softmax'))  # -2
        model.add(Activation('softmax', name='predictions'))  #
        if restore:
            model.load_weights(restore)

        layer_outputs = []
        for layer in model.layers:
            if isinstance(layer, Conv2D) or isinstance(layer, Dense):
                layer_outputs.append(K.function([model.layers[0].input], [layer.output]))

        self.layer_outputs = layer_outputs
        self.model = model
예제 #3
0
def build_generator_dense():
    
    model = Sequential()

    # Add arbitrary layers
    first = True
    for size in generator_layers.split(":"):
        size = int(size)
        if first:
            model.add(Dense(size, input_shape=noise_shape, activation=generator_activation))
        else:
            model.add(Dense(size, activation=generator_activation))

        model.add(Dropout(dropout_value))
        first = False

    # Add the final layer
    model.add(Dense(  np.prod(url_shape) , activation="tanh"))
    model.add(Dropout(dropout_value))
    model.add(Reshape(url_shape))
    model.summary()

    # Build the model
    noise = Input(shape=noise_shape)
    gen = model(noise)

    return Model(noise, gen)
예제 #4
0
    def conv_3d(self):
        """
        Build a 3D convolutional network, based loosely on C3D.
            https://arxiv.org/pdf/1412.0767.pdf
        """
        # Model.
        model = Sequential()
        model.add(
            Conv3D(32, (3, 3, 3),
                   activation='relu',
                   input_shape=self.input_shape))
        model.add(MaxPooling3D(pool_size=(1, 2, 2), strides=(1, 2, 2)))
        model.add(Conv3D(64, (3, 3, 3), activation='relu'))
        model.add(MaxPooling3D(pool_size=(1, 2, 2), strides=(1, 2, 2)))
        model.add(Conv3D(128, (3, 3, 3), activation='relu'))
        model.add(Conv3D(128, (3, 3, 3), activation='relu'))
        model.add(MaxPooling3D(pool_size=(1, 2, 2), strides=(1, 2, 2)))
        model.add(Conv3D(256, (2, 2, 2), activation='relu'))
        model.add(Conv3D(256, (2, 2, 2), activation='relu'))
        model.add(MaxPooling3D(pool_size=(1, 2, 2), strides=(1, 2, 2)))

        model.add(Flatten())
        model.add(Dense(1024))
        model.add(Dropout(0.5))
        model.add(Dense(1024))
        model.add(Dropout(0.5))
        model.add(Dense(self.nb_classes, activation='softmax'))

        return model
예제 #5
0
    def add_conv_layer(self, img_size=(32, 32), img_channels=3):
        self.classifier.add(
            BatchNormalization(input_shape=(img_size[0], img_size[1],
                                            img_channels)))

        self.classifier.add(
            Conv2D(32, (3, 3), padding='same', activation='relu'))
        self.classifier.add(Conv2D(32, (3, 3), activation='relu'))
        self.classifier.add(MaxPooling2D(pool_size=2))
        self.classifier.add(Dropout(0.25))

        self.classifier.add(
            Conv2D(64, (3, 3), padding='same', activation='relu'))
        self.classifier.add(Conv2D(64, (3, 3), activation='relu'))
        self.classifier.add(MaxPooling2D(pool_size=2))
        self.classifier.add(Dropout(0.25))

        self.classifier.add(
            Conv2D(128, (3, 3), padding='same', activation='relu'))
        self.classifier.add(Conv2D(128, (3, 3), activation='relu'))
        self.classifier.add(MaxPooling2D(pool_size=2))
        self.classifier.add(Dropout(0.25))

        self.classifier.add(
            Conv2D(256, (3, 3), padding='same', activation='relu'))
        self.classifier.add(Conv2D(256, (3, 3), activation='relu'))
        self.classifier.add(MaxPooling2D(pool_size=2))
        self.classifier.add(Dropout(0.25))
    def __init__(self, restore=None, session=None, use_softmax=False):
        self.num_channels = 1
        self.image_size = 28
        self.num_labels = 10
        self.shape = [None, 28 * 28]
        model = Sequential()
        model.add(
            Dense(512,
                  activation='relu',
                  input_shape=(28 * 28, ),
                  name='dense_1'))
        model.add(Dropout(0.2, name='d1'))
        model.add(Dense(512, activation='relu', name='dense_2'))
        model.add(Dropout(0.2, name='d2'))
        model.add(Dense(10, activation='softmax', name='dense_3'))
        if restore:
            model.load_weights(restore, by_name=True)

        layer_outputs = []
        for layer in model.layers:
            if isinstance(layer, Conv2D) or isinstance(layer, Dense):
                layer_outputs.append(
                    K.function([model.layers[0].input], [layer.output]))

        self.layer_outputs = layer_outputs
        self.model = model
예제 #7
0
 def __init__(self, nn_type="resnet50", restore = None, session=None, use_imagenet_pretrain=False, use_softmax=True):
     self.image_size = 224
     self.num_channels = 3
     self.num_labels = 8
 
     input_layer = Input(shape=(self.image_size, self.image_size, self.num_channels))
     weights = "imagenet" if use_imagenet_pretrain else None
     if nn_type == "resnet50":
         base_model = ResNet50(weights=weights, input_tensor=input_layer)
     elif nn_type == "vgg16":
         base_model = VGG16(weights=weights, input_tensor=input_layer)
         # base_model = VGG16(weights=None, input_tensor=input_layer)
     x = base_model.output
     x = LeakyReLU()(x)
     x = Dense(1024)(x)
     x = Dropout(0.2)(x)
     x = LeakyReLU()(x)
     x = Dropout(0.3)(x)
     x = Dense(8)(x)
     if use_softmax:
         x = Activation("softmax")(x)
     model = Model(inputs=base_model.input, outputs=x)
 
     # for layer in base_model.layers:
     # 	layer.trainable = False
 
 
     if restore:
         print("Load: {}".format(restore))
         model.load_weights(restore)
 
     self.model = model
예제 #8
0
 def classifier(self, x):
     x = Dropout(0.5)(x)  #             nn.Dropout(),
     x = Dense(4096)(x)  #nn.Linear(256 * 6 * 6, 4096),
     x = Activation('relu')(x)  #nn.ReLU(inplace=True),
     x = Dropout(0.5)(x)  #nn.Dropout(),
     x = Dense(4096)(x)  #nn.Linear(4096, 4096),
     x = Activation('relu')(x)  #nn.ReLU(inplace=True),
     x = Dense(self.num_classes)(x)  #nn.Linear(4096, num_classes),
     return x
예제 #9
0
 def add_conv_layer(self, img_size=(32, 32), img_channels=3):
     self.classifier.add(BatchNormalization(input_shape=(*img_size, img_channels)))
     self.classifier.add(Conv2D(32, (3, 3), activation='relu'))
     self.classifier.add(MaxPooling2D(pool_size=(2, 2)))
     self.classifier.add(Dropout(0.25))
     self.classifier.add(Conv2D(64, (3, 3), activation='relu'))
     self.classifier.add(MaxPooling2D(pool_size=(2, 2)))
     self.classifier.add(Dropout(0.25))
     self.classifier.add(Conv2D(16, (2, 2), activation='relu'))
     self.classifier.add(MaxPooling2D(pool_size=(2, 2)))
     self.classifier.add(Dropout(0.25))
예제 #10
0
 def train(self,
           x_train,
           x_test,
           y_train,
           y_test,
           embedding_matrix,
           num_classes,
           seq_length=200,
           emb_dim=100,
           train_emb=True,
           windows=(3, 4, 5, 6),
           dropouts=(0.2, 0.4),
           filter_sz=100,
           hid_dim=100,
           bch_siz=50,
           epoch=8):
     #setup and train the nueral net
     from tensorflow.contrib.keras.api.keras.models import Model
     from tensorflow.contrib.keras.api.keras.layers import Activation, Dense, Dropout, Embedding, Flatten, Input, Concatenate, Conv1D, MaxPool1D
     inp = Input(shape=(seq_length, ))
     out = Embedding(input_dim=len(embedding_matrix[:, 1]),
                     output_dim=emb_dim,
                     input_length=seq_length,
                     weights=[embedding_matrix],
                     trainable=train_emb)(inp)
     out = Dropout(dropouts[0])(out)
     convs = []
     for w in windows:
         conv = Conv1D(filters=filter_sz,
                       kernel_size=w,
                       padding='valid',
                       activation='relu',
                       strides=1)(out)
         conv = MaxPool1D(pool_size=2)(conv)
         conv = Flatten()(conv)
         convs.append(conv)
     out = Concatenate()(convs)
     out = Dense(hid_dim, activation='relu')(out)
     out = Dropout(dropouts[1])(out)
     out = Activation('relu')(out)
     out = Dense(num_classes, activation='softmax')(out)
     model = Model(inp, out)
     model.compile(loss='categorical_crossentropy',
                   optimizer='nadam',
                   metrics=['accuracy'])
     model.fit(x_train,
               y_train,
               batch_size=bch_siz,
               epochs=epoch,
               verbose=2,
               validation_data=(x_test, y_test))
     return model
예제 #11
0
    def mlp(self):
        """Build a simple MLP. It uses extracted features as the input
        because of the otherwise too-high dimensionality."""
        # Model.
        model = Sequential()
        model.add(Flatten(input_shape=self.input_shape))
        model.add(Dense(512))
        model.add(Dropout(0.5))
        model.add(Dense(512))
        model.add(Dropout(0.5))
        model.add(Dense(self.nb_classes, activation='softmax'))

        return model
def decoder_block(x, y, scope, size=None, upconv=True, ksize=(3, 3), upsize=(2, 2), upstirdes=(2, 2), act_fn='relu',
                  ep_collection='end_points', reuse=None, batch_norm=True, dropout=0.0):
    if size is None:
        base_size = x.get_shape().as_list()[-1]
        size = int(base_size / 2)
    with tf.variable_scope(scope, scope, [x], reuse=reuse) as sc:
        x = ThresholdedReLU(theta=0.0)(x)
        uped = Conv2DTranspose(size, upsize, strides=upstirdes, padding='same')(x) if upconv else x

        uped, y = reconcile_feature_size(uped, y)
        up = concatenate([uped, y], axis=3)
        tf.add_to_collection(ep_collection, up)

        conv = Conv2D(size, ksize, activation=act_fn, padding='same')(up)
        tf.add_to_collection(ep_collection, conv)

        conv = Conv2D(size, ksize, activation=act_fn, padding='same')(conv)
        tf.add_to_collection(ep_collection, conv)

        if batch_norm:
            conv = BatchNormalization()(conv, training=True)
            tf.add_to_collection(ep_collection, conv)
        if dropout > 0.0:
            conv = Dropout(dropout)(conv)
            tf.add_to_collection(ep_collection, conv)
    return conv
예제 #13
0
 def forward(self, x):
     if self.transform_input:
         x = x.clone()
         x[:, 0] = x[:, 0] * (0.229 / 0.5) + (0.485 - 0.5) / 0.5
         x[:, 1] = x[:, 1] * (0.224 / 0.5) + (0.456 - 0.5) / 0.5
         x[:, 2] = x[:, 2] * (0.225 / 0.5) + (0.406 - 0.5) / 0.5
     # 299 x 299 x 3
     x = self.Conv2d_1a_3x3.forward(x)  #x = self.Conv2d_1a_3x3(x)
     # 149 x 149 x 32
     x = self.Conv2d_2a_3x3.forward(x)  #x = self.Conv2d_2a_3x3(x)
     # 147 x 147 x 32
     x = self.Conv2d_2b_3x3.forward(x)  #x = self.Conv2d_2b_3x3(x)
     # 147 x 147 x 64
     x = MaxPooling2D(pool_size=(3, 3), strides=(2, 2))(
         x)  #x = F.max_pool2d(x, kernel_size=3, stride=2)
     # 73 x 73 x 64
     x = self.Conv2d_3b_1x1.forward(x)  #x = self.Conv2d_3b_1x1(x)
     # 73 x 73 x 80
     x = self.Conv2d_4a_3x3.forward(x)  #x = self.Conv2d_4a_3x3(x)
     # 71 x 71 x 192
     x = MaxPooling2D(pool_size=(3, 3), strides=(2, 2))(
         x)  #x = F.max_pool2d(x, kernel_size=3, stride=2)
     # 35 x 35 x 192
     x = self.Mixed_5b.forward(x)  #x = self.Mixed_5b(x)
     # 35 x 35 x 256
     x = self.Mixed_5c.forward(x)  #x = self.Mixed_5c(x)
     # 35 x 35 x 288
     x = self.Mixed_5d.forward(x)  #x = self.Mixed_5d(x)
     # 35 x 35 x 288
     x = self.Mixed_6a.forward(x)  #x = self.Mixed_6a(x)
     # 17 x 17 x 768
     x = self.Mixed_6b.forward(x)  #x = self.Mixed_6b(x)
     # 17 x 17 x 768
     x = self.Mixed_6c.forward(x)  #x = self.Mixed_6c(x)
     # 17 x 17 x 768
     x = self.Mixed_6d.forward(x)  #x = self.Mixed_6d(x)
     # 17 x 17 x 768
     x = self.Mixed_6e.forward(x)  #x = self.Mixed_6e(x)
     # 17 x 17 x 768
     #         if self.aux_logits:
     #             aux = self.AuxLogits.forward(x)                  #aux = self.AuxLogits(x)
     # 17 x 17 x 768
     x = self.Mixed_7a.forward(x)  #x = self.Mixed_7a(x)
     # 8 x 8 x 1280
     x = self.Mixed_7b.forward(x)  #x = self.Mixed_7b(x)
     # 8 x 8 x 2048
     x = self.Mixed_7c.forward(x)  #x = self.Mixed_7c(x)
     # 8 x 8 x 2048
     x = AveragePooling2D(pool_size=(8, 8))(
         x)  #x = F.avg_pool2d(x, kernel_size=8)
     # 1 x 1 x 2048
     x = Dropout(0.5)(x)  #x = F.dropout(x, training=self.training)
     # 1 x 1 x 2048
     x = Flatten()(x)  #x = x.view(x.size(0), -1)
     # 2048
     x = self.fc(x)
     # 1000 (num_classes)
     #         if self.aux_logits:
     #             return x, aux
     return x
def get_clf():
    """
    Standard neural network training procedure.
    """
    model = Sequential()

    model.add(Conv2D(32, (3, 3), input_shape=(28, 28, 1)))
    model.add(Activation('relu'))
    model.add(Conv2D(32, (3, 3)))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))

    model.add(Conv2D(64, (3, 3)))
    model.add(Activation('relu'))
    model.add(Conv2D(64, (3, 3)))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))

    model.add(Flatten())
    model.add(Dense(200))
    model.add(Activation('relu'))
    model.add(Dropout(0.5))
    model.add(Dense(200))
    model.add(Activation('relu'))
    model.add(Dense(10))

    model.load_weights("./models/mnist")

    def fn(correct, predicted):
        return tf.nn.softmax_cross_entropy_with_logits(labels=correct, logits=predicted)

    sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
    model.compile(loss=fn, optimizer=sgd, metrics=['accuracy'])

    return model
예제 #15
0
def build_classifier(optimizer):
    classifier = Sequential()
    classifier.add(
        Dense(units=6,
              kernel_initializer='uniform',
              activation='relu',
              input_dim=11))
    classifier.add(Dropout(0.1))
    classifier.add(
        Dense(units=6, kernel_initializer='uniform', activation='relu'))
    classifier.add(Dropout(0.1))
    classifier.add(
        Dense(units=1, kernel_initializer='uniform', activation='sigmoid'))
    classifier.compile(optimizer=optimizer,
                       loss='binary_crossentropy',
                       metrics=['accuracy'])
    return classifier
예제 #16
0
def train(data,
          file_name,
          params,
          num_epochs=50,
          batch_size=128,
          train_temp=1,
          init=None):
    """
    Standard neural network training procedure.
    """
    model = Sequential()

    print(data.train_data.shape)

    model.add(Conv2D(params[0], (3, 3), input_shape=data.train_data.shape[1:]))
    model.add(Activation('relu'))
    model.add(Conv2D(params[1], (3, 3)))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))

    model.add(Conv2D(params[2], (3, 3)))
    model.add(Activation('relu'))
    model.add(Conv2D(params[3], (3, 3)))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))

    model.add(Flatten())
    model.add(Dense(params[4]))
    model.add(Activation('relu'))
    model.add(Dropout(0.5))
    model.add(Dense(params[5]))
    model.add(Activation('relu'))
    model.add(Dense(10))

    if init != None:
        model.load_weights(init)

    def fn(correct, predicted):
        return tf.nn.softmax_cross_entropy_with_logits(labels=correct,
                                                       logits=predicted /
                                                       train_temp)

    sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)

    model.compile(loss=fn, optimizer=sgd, metrics=['accuracy'])

    model.fit(data.train_data,
              data.train_labels,
              batch_size=batch_size,
              validation_data=(data.validation_data, data.validation_labels),
              epochs=num_epochs,
              shuffle=True)

    if file_name != None:
        model.save(file_name)

    return model
def get_dae_clf():
    model1 = Sequential()

    model1.add(Lambda(lambda x_: x_ + 0.5, input_shape=(28, 28, 1)))

    # Encoder
    model1.add(Conv2D(3, (3, 3), activation="sigmoid", padding="same", activity_regularizer=regs.l2(1e-9)))
    model1.add(AveragePooling2D((2, 2), padding="same"))
    model1.add(Conv2D(3, (3, 3), activation="sigmoid", padding="same", activity_regularizer=regs.l2(1e-9)))

    # Decoder
    model1.add(Conv2D(3, (3, 3), activation="sigmoid", padding="same", activity_regularizer=regs.l2(1e-9)))
    model1.add(UpSampling2D((2, 2)))
    model1.add(Conv2D(3, (3, 3), activation="sigmoid", padding="same", activity_regularizer=regs.l2(1e-9)))
    model1.add(Conv2D(1, (3, 3), activation='sigmoid', padding='same', activity_regularizer=regs.l2(1e-9)))

    model1.add(Lambda(lambda x_: x_ - 0.5))

    model1.load_weights("./dae/mnist")
    model1.compile(loss='mean_squared_error', metrics=['mean_squared_error'], optimizer='adam')

    model2 = Sequential()

    model2.add(Conv2D(32, (3, 3), input_shape=(28, 28, 1)))
    model2.add(Activation('relu'))
    model2.add(Conv2D(32, (3, 3)))
    model2.add(Activation('relu'))
    model2.add(MaxPooling2D(pool_size=(2, 2)))

    model2.add(Conv2D(64, (3, 3)))
    model2.add(Activation('relu'))
    model2.add(Conv2D(64, (3, 3)))
    model2.add(Activation('relu'))
    model2.add(MaxPooling2D(pool_size=(2, 2)))

    model2.add(Flatten())
    model2.add(Dense(200))
    model2.add(Activation('relu'))
    model2.add(Dropout(0.5))
    model2.add(Dense(200))
    model2.add(Activation('relu'))
    model2.add(Dense(10))

    model2.load_weights("./models/mnist")

    def fn(correct, predicted):
        return tf.nn.softmax_cross_entropy_with_logits(labels=correct, logits=predicted)

    model2.compile(loss=fn, optimizer=SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True), metrics=['accuracy'])

    model = Sequential()
    model.add(model1)
    model.add(model2)
    model.compile(loss=fn, optimizer=SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True), metrics=['accuracy'])

    return model
예제 #18
0
파일: ann1.py 프로젝트: KSR4599/ANN
def build_classifier(optimizer):
    classifier = Sequential()
    classifier.add(Dense(units=6, kernel_initializer='uniform', activation='relu', input_dim=11))
    # Improving the ANN
    # Dropout Regularization to reduce overfitting if needed
    classifier.add(Dropout(0.1))
    classifier.add(Dense(units=6, kernel_initializer='uniform', activation='relu'))
    classifier.add(Dense(units=1, kernel_initializer='uniform', activation='sigmoid'))
    classifier.compile(optimizer=optimizer, loss='binary_crossentropy', metrics=['accuracy'])
    return classifier
예제 #19
0
def build_regressor(optimizer):
    regressor = Sequential()
    regressor.add(Dense(units=6, kernel_initializer='uniform', activation='relu', input_dim=11))
    # Improving the ANN
    # Dropout Regularization to reduce overfitting if needed
    regressor.add(Dropout(0.1))
    regressor.add(Dense(units=6, kernel_initializer='uniform', activation='relu'))
    regressor.add(Dense(units=1, kernel_initializer='uniform', activation='linear'))
    regressor.compile(optimizer=optimizer, loss='mean_squared_error')
    return regressor
예제 #20
0
def get_model():
    """
    get model
    """

    checkpoint = ModelCheckpoint(MODEL_NAME,
                                 monitor='val_acc',
                                 verbose=1,
                                 save_best_only=True)

    model = Sequential()
    input_shape = (IMAGE_SIZE, IMAGE_SIZE, 3)

    model.add(BatchNormalization(input_shape=input_shape))

    model.add(Conv2D(32, (3, 3), padding='same', activation='relu'))
    model.add(Conv2D(32, (3, 3), activation='relu'))
    model.add(MaxPooling2D(pool_size=2))
    model.add(Dropout(0.25))

    model.add(Conv2D(64, (3, 3), padding='same', activation='relu'))
    model.add(Conv2D(64, (3, 3), activation='relu'))
    model.add(MaxPooling2D(pool_size=2))
    model.add(Dropout(0.25))

    model.add(Conv2D(128, (3, 3), padding='same', activation='relu'))
    model.add(Conv2D(128, (3, 3), activation='relu'))
    model.add(MaxPooling2D(pool_size=2))
    model.add(Dropout(0.25))

    model.add(Conv2D(256, (3, 3), padding='same', activation='relu'))
    model.add(Conv2D(256, (3, 3), activation='relu'))
    model.add(MaxPooling2D(pool_size=2))
    model.add(Dropout(0.25))

    model.add(Flatten())

    model.add(Dense(512, activation='relu'))
    model.add(BatchNormalization())
    model.add(Dropout(0.5))
    model.add(Dense(N_CLASSES, activation='sigmoid'))

    return model
예제 #21
0
    def default_model(input_shape, classes):

        from tensorflow.contrib.keras.api.keras.layers import LSTM, Dense, LeakyReLU, Dropout

        model = tf.keras.models.Sequential()

        model.add(LSTM(256, input_shape=input_shape, return_sequences=True))
        model.add(LeakyReLU(alpha=0.05))
        model.add(Dropout(0.2))

        model.add(LSTM(128))
        model.add(LeakyReLU(alpha=0.02))
        model.add(Dropout(0.1))

        model.add(Dense(32, activation="tanh"))
        model.add(Dropout(0.1))

        model.add(Dense(classes, activation="softmax"))

        return model
예제 #22
0
    def init_model(self):
        with tf.variable_scope('cnn', reuse=(self.gpu_id != 0)):
            x = self.input_img
            x = tf.reshape(x, (-1, 28, 28, 1))
            x = Conv2D(20, (5, 5),
                       padding='same',
                       activation='relu',
                       name='conv1')(x)
            x = MaxPooling2D()(x)
            x = Conv2D(50, (5, 5),
                       padding='same',
                       activation='relu',
                       name='conv2')(x)
            x = MaxPooling2D()(x)
            x = Dropout(self.input_droprate)(x)
            x = Flatten()(x)
            x = Dense(500, activation='relu', name='fc1')(x)
            x = Dropout(self.input_droprate)(x)
            x = Dense(args.classes, activation='softmax', name='fc2')(x)

            self.output = x
예제 #23
0
 def forward(self, x):
     if K.image_data_format() == 'channels_first':
         channel_axis = 1
     else:
         channel_axis = -1
     x = self.BN1(x) 
     x = self.relu(x)
     x = self.conv(x)
     x = self.BN2(x)
     x = self.relu2(x)
     new_features = self.conv2(x)                     #new_features = super(_DenseLayer, self).forward(x)
     if self.drop_rate > 0:
         new_features = Dropout(self.drop_rate)(new_features)     #new_features = F.dropout(new_features, p=self.drop_rate, training=self.training)
     return concatenate([x, new_features], axis=channel_axis)     #torch.cat([x, new_features], 1)
예제 #24
0
    def lstm(self):
        """Build a simple LSTM network. We pass the extracted features from
        our CNN to this model predomenently."""
        # Model.
        model = Sequential()
        model.add(
            LSTM(2048,
                 return_sequences=False,
                 input_shape=self.input_shape,
                 dropout=0.5))
        model.add(Dense(512, activation='relu'))
        model.add(Dropout(0.5))
        model.add(Dense(self.nb_classes, activation='softmax'))

        return model
def build_classifier(
    optimizer
):  #  we explicitly use optimizer parameter here, and other parameters are in fit method
    Classifier = Sequential()
    Classifier.add(
        Dense(units=6,
              kernel_initializer='uniform',
              activation='relu',
              input_dim=11))
    Classifier.add(Dropout(0.1))
    Classifier.add(
        Dense(units=6, kernel_initializer='uniform', activation='relu'))
    Classifier.add(
        Dense(units=1, kernel_initializer='uniform', activation='sigmoid'))
    Classifier.compile(optimizer=optimizer,
                       loss='binary_crossentropy',
                       metrics=['accuracy'])
    return Classifier
예제 #26
0
def build_discriminator_dense():

    model = Sequential()
    model.add(Flatten(input_shape=url_shape))

    # Add arbitrary layers
    for size in discriminator_layers.split(":"):
        size = int(size)
        model.add(Dense(size, activation=discriminator_activation))
        model.add(Dropout(dropout_value))

    # Add the final layer, with a single output
    model.add(Dense(1, activation='sigmoid'))
    model.summary()

    # Build the model
    gen = Input(shape=url_shape)
    validity = model(gen)
    return Model(gen, validity)
예제 #27
0
    def __init__(self):
        self.model = Sequential()
        self.model.add(
            Conv2D(32, (3, 3), input_shape=(IMAGE_SIZE, IMAGE_SIZE, 3)))
        self.model.add(Activation('relu'))
        self.model.add(MaxPooling2D(pool_size=(2, 2)))

        self.model.add(Conv2D(32, (3, 3)))
        self.model.add(Activation('relu'))
        self.model.add(MaxPooling2D(pool_size=(2, 2)))

        self.model.add(Conv2D(64, (3, 3)))
        self.model.add(Activation('relu'))
        self.model.add(MaxPooling2D(pool_size=(2, 2)))

        self.model.add(Flatten())
        self.model.add(Dense(64))
        self.model.add(Activation('relu'))
        self.model.add(Dropout(0.5))
        self.model.add(Dense(1))
        self.model.add(Activation('sigmoid'))
예제 #28
0
def binary_conv(input, ksize, o_ch, padding, strides, name, dropout=0):
    with tf.variable_scope(name):
        x = input
        x = BatchNormalization(axis=3,
                               epsilon=1e-4,
                               momentum=0.9,
                               name='bn',
                               gamma_initializer=tf.random_uniform_initializer(
                                   0, 1))(x)
        xb = binary(x)

        if dropout > 0:
            xb = Dropout(dropout)(xb)

        i_ch = xb.shape.as_list()[-1]
        w = tf.get_variable('conv/kernel',
                            shape=[ksize, ksize, i_ch, o_ch],
                            initializer=tf.random_normal_initializer(
                                mean=0.0, stddev=0.05))
        b = tf.get_variable('conv/bias',
                            shape=[o_ch],
                            initializer=tf.constant_initializer(0))
        wb = binary2(w)
        if padding > 0:
            xb = tf.pad(
                xb, [[0, 0], [padding, padding], [padding, padding], [0, 0]])
        s = tf.nn.conv2d(xb,
                         wb,
                         strides=[1, strides, strides, 1],
                         padding='VALID')
        x = s + b

        x = myrelu(x, 'prelu')

    norm_op = tf.assign(w, w - tf.reduce_mean(w, axis=2, keep_dims=True))
    tf.add_to_collection('norm_op', norm_op)
    clip_op = tf.assign(w, tf.clip_by_value(w, -1, 1))
    tf.add_to_collection('clip_op', clip_op)
    return x
예제 #29
0
파일: dmnn.py 프로젝트: tobytoy/MotionGAN
    def classifier(self, x):
        scope = Scoping.get_global_scope()
        with scope.name_scope('classifier'):
            if self.data_set == 'NTURGBD':
                blocks = [{'size': 128, 'bneck': 32,  'groups': 16, 'strides': 1},
                          {'size': 256, 'bneck': 64,  'groups': 16, 'strides': 2},
                          {'size': 512, 'bneck': 128, 'groups': 16, 'strides': 2}]
                n_reps = 3
            else:
                blocks = [{'size': 64,  'bneck': 32, 'groups': 8, 'strides': 3},
                          {'size': 128, 'bneck': 64, 'groups': 8, 'strides': 3}]
                n_reps = 3

            def _data_augmentation(x):
                return K.in_train_phase(_sim_occlusions(_jitter_height(x)), x)

            x = Lambda(_data_augmentation, name=scope+"data_augmentation")(x)

            x = CombMatrix(self.njoints, name=scope+'comb_matrix')(x)

            x = EDM(name=scope+'edms')(x)
            x = Reshape((self.njoints * self.njoints, self.seq_len, 1), name=scope+'resh_in')(x)

            x = BatchNormalization(axis=-1, name=scope+'bn_in')(x)
            x = Conv2D(blocks[0]['bneck'], 1, 1, name=scope+'conv_in', **CONV2D_ARGS)(x)
            for i in range(len(blocks)):
                for j in range(n_reps):
                    with scope.name_scope('block_%d_%d' % (i, j)):
                        x = _conv_block(x, blocks[i]['size'], blocks[i]['bneck'],
                                        blocks[i]['groups'], 3, blocks[i]['strides'] if j == 0 else 1)

            x = Lambda(lambda args: K.mean(args, axis=(1, 2)), name=scope+'mean_pool')(x)
            x = BatchNormalization(axis=-1, name=scope + 'bn_out')(x)
            x = Activation('relu', name=scope + 'relu_out')(x)

            x = Dropout(self.dropout, name=scope+'dropout')(x)
            x = Dense(self.num_actions, activation='softmax', name=scope+'label')(x)

        return x
예제 #30
0
    def __init__(self, img_size, img_channels=3, output_size=17):
        self.losses = []
        self.model = Sequential()
        self.model.add(
            BatchNormalization(input_shape=(img_size[0], img_size[1],
                                            img_channels)))

        self.model.add(Conv2D(32, (3, 3), padding='same', activation='relu'))
        self.model.add(Conv2D(32, (3, 3), activation='relu'))
        self.model.add(MaxPooling2D(pool_size=2))
        self.model.add(Dropout(0.3))

        self.model.add(Conv2D(64, (3, 3), padding='same', activation='relu'))
        self.model.add(Conv2D(64, (3, 3), activation='relu'))
        self.model.add(MaxPooling2D(pool_size=2))
        self.model.add(Dropout(0.3))

        self.model.add(Conv2D(128, (3, 3), padding='same', activation='relu'))
        self.model.add(Conv2D(128, (3, 3), activation='relu'))
        self.model.add(MaxPooling2D(pool_size=2))
        self.model.add(Dropout(0.3))

        self.model.add(Conv2D(256, (3, 3), padding='same', activation='relu'))
        self.model.add(Conv2D(256, (3, 3), activation='relu'))
        self.model.add(MaxPooling2D(pool_size=2))
        self.model.add(Dropout(0.3))

        self.model.add(Conv2D(512, (3, 3), padding='same', activation='relu'))
        self.model.add(Conv2D(512, (3, 3), activation='relu'))
        self.model.add(MaxPooling2D(pool_size=2))
        self.model.add(Dropout(0.3))

        self.model.add(Flatten())

        self.model.add(Dense(512, activation='relu'))
        self.model.add(BatchNormalization())
        self.model.add(Dropout(0.5))

        self.model.add(Dense(output_size, activation='sigmoid'))