Esempio n. 1
0
def define_model2(vocab_size, max_length):
    # feature extractor (encoder)
    inputs1 = Input(shape=(5, 5, 2048))
    fe1 = GlobalMaxPooling2D()(inputs1)
    fe2 = Dense(128, activation='relu')(fe1)
    fe3 = RepeatVector(max_length)(fe2)
    # embedding
    inputs2 = Input(shape=(max_length, ))
    emb2 = Embedding(vocab_size, 50, mask_zero=True)(inputs2)
    emb3 = LSTM(512, return_sequences=True)(emb2)
    emb4 = TimeDistributed(Dense(128, activation='relu'))(emb3)
    # merge inputs
    merged = concatenate([fe3, emb2])
    # language model (decoder)
    lm2 = LSTM(512)(merged)
    lm3 = Dense(512, activation='relu')(lm2)
    outputs = Dense(vocab_size, activation='softmax')(lm3)
    # tie it together [image, seq] [word]
    model = Model(inputs=[inputs1, inputs2], outputs=outputs)
    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])
    # print(model.summary())
    # plot_model(model, show_shapes=True, to_file='plot.png')
    return model
    def __init__(self,
                 n_comp=2,
                 model=None,
                 layers=[],
                 representation='max',
                 percentage_discard=0.1,
                 face_verif=False):
        if len(layers) == 0:
            self.layers = list(range(1, len(
                model.layers)))  #Starts by one since the 0 index is the Input
        else:
            self.layers = layers

        if representation == 'max':
            self.pool = GlobalMaxPooling2D()
        elif representation == 'avg':
            self.pool = GlobalAveragePooling2D()
        else:
            self.pool = representation

        self.n_comp = n_comp
        self.scores = None
        self.score_layer = None
        self.idx_score_layer = []
        self.template_model = model
        self.conv_net = self.custom_model(model=model, layers=self.layers)
        self.percentage_discard = percentage_discard
        self.face_verif = face_verif
def createModel():
    model = Sequential()
    model.add(Cropping2D(cropping=((50,20), (0,0)), input_shape=(160,320,3)))
    model.add(Lambda(stack))
    model.add(Lambda(lambda x: (x / 255.0) - 0.5))

    model.add(Conv2D(10, (5,5), activation='relu'))
    model.add(MaxPooling2D())

    model.add(Conv2D(20, (5,5), activation='relu'))
    model.add(MaxPooling2D())

    model.add(Conv2D(40, (5,5), activation='relu'))
    model.add(MaxPooling2D())

    model.add(Conv2D(80, (3,3), activation='relu'))
    model.add(GlobalMaxPooling2D())

    model.add(Dropout(0.5))
    model.add(Dense(1024))
    model.add(Dropout(0.5))
    model.add(Dense(1024))
    model.add(Dense(1))

    return model
Esempio n. 4
0
def define_model(vocab_size, max_length):
    # feature extractor (encoder)
    inputs1 = Input(shape=(7, 7, 512))
    fe1 = GlobalMaxPooling2D()(inputs1)

    fe2 = Dense(128, activation='relu')(fe1)

    fe3 = RepeatVector(max_length)(fe2)

    # embedding encoder
    inputs2 = Input(shape=(max_length, ))
    emb2 = Embedding(vocab_size, 50, mask_zero=True)(inputs2)
    emb3 = LSTM(256, return_sequences=True)(emb2)
    emb4 = TimeDistributed(Dense(128, activation='relu'))(emb3)
    # merge inputs
    merged = concatenate([fe3, emb4])
    # language model (decoder)
    lm2 = LSTM(500)(merged)
    lm3 = Dense(500, activation='relu')(lm2)
    outputs = Dense(vocab_size, activation='softmax')(lm3)
    # Merging the models together [image, seq] [word]
    model = Model(inputs=[inputs1, inputs2], outputs=outputs)

    print("Wights loaded")

    model.load_weights('weghits/weghits.now.h5')
    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])
    print(model.summary())
    return model
Esempio n. 5
0
 def extract_layer(base_model, layer, pooling=None):
     # add a global spatial average pooling layer
     model = base_model.get_layer(layer).output
     if pooling == 'avg':
         model = GlobalAveragePooling2D()(model)
     elif pooling == 'max':
         model = GlobalMaxPooling2D()(model)
     return Model(inputs=base_model.input, outputs=model)
Esempio n. 6
0
def DivResNet(include_top=True,
              weights='imagenet',
              input_tensor=None,
              pooling='max',
              classes=num_classes):  # 这里采用的权重是imagenet,可以更改,种类为1000#
    # Determine proper input shape
    if input_tensor is None:
        img_input = Input(shape=input_shape)
    else:
        if not K.is_keras_tensor(input_tensor):
            img_input = Input(tensor=input_tensor, shape=input_shape)
        else:
            img_input = input_tensor
    if K.image_data_format() == 'channels_last':
        bn_axis = 3
    else:
        bn_axis = 1

    x = ZeroPadding2D((3, 3))(img_input)  # 对图片界面填充0,保证特征图的大小#
    x = Conv2D(64, (7, 7), strides=(2, 2), name='conv1')(x)  # 定义卷积层#
    x = BatchNormalization(axis=bn_axis, name='bn_conv1')(x)  # 批标准化#
    x = Activation('relu')(x)  # 激活函数#
    x = MaxPooling2D((3, 3), strides=(2, 2))(x)  # 最大池化层#
    # stage2#
    x = conv_block(x, 3, [32, 32, 128], stage=2, block='a', strides=(1, 1))
    x = identity_block(x, 3, [32, 32, 128], stage=2, block='b')
    # stage3#
    x = conv_block(x, 3, [64, 64, 256], stage=3, block='a')
    x = identity_block(x, 3, [64, 64, 256], stage=3, block='b')
    # stage4#
    x = conv_block(x, 3, [128, 128, 512], stage=4, block='a')
    x = identity_block(x, 3, [128, 128, 512], stage=4, block='b')
    # stage5#
    x = conv_block(x, 3, [256, 256, 1024], stage=5, block='a')
    x = identity_block(x, 3, [256, 256, 1024], stage=5, block='b')

    x = AveragePooling2D((4, 4), name='avr_pool')(x)  # 平均池化层#

    if include_top:
        x = Flatten()(x)
        x = Dense(classes, activation='sigmoid', name='fc1000')(x)
    else:
        if pooling == 'avg':
            x = GlobalAveragePooling2D()(x)
        elif pooling == 'max':
            x = GlobalMaxPooling2D()(x)

    # Ensure that the model takes into account
    # any potential predecessors of `input_tensor`.
    if input_tensor is not None:
        inputs = get_source_inputs(input_tensor)
    else:
        inputs = img_input
    # Create model.
    model = Model(inputs, x, name='resnet50')
    return model
Esempio n. 7
0
def se_block(blockInput, bottle=4):
    channel_cnt = int(blockInput.shape[-1])

    x = GlobalMaxPooling2D(data_format="channels_last")(blockInput)
    x = Dense(int(channel_cnt / bottle))(x)
    x = BatchNormalization()(x)
    x = Activation("relu")(x)
    x = Dense(channel_cnt)(x)
    x = Activation("sigmoid")(x)
    x = Reshape((1, 1, channel_cnt))(x)
    x = Multiply()([blockInput, x])
    return x
def create_model(MAX_QRY_LENGTH=50,
                 MAX_DOC_LENGTH=2900,
                 NUM_OF_FEATS=10,
                 PSGS_SIZE=[(50, 1)],
                 NUM_OF_FILTERS=5,
                 tau=1):
    alpha_size = len(PSGS_SIZE)
    psgMat = Input(shape=(
        MAX_QRY_LENGTH,
        MAX_DOC_LENGTH,
        1,
    ), name="passage")
    homoMat = Input(shape=(NUM_OF_FEATS, ), name="h_feats")
    # Convolution2D, Meaning pooling and Max pooling.
    # Conv2D, Mean pooling, Max pooling
    M, K, r = [], [], []
    for idx, PSG_SIZE in enumerate(PSGS_SIZE):
        tau = PSG_SIZE[0] / 2
        pool_size = (MAX_QRY_LENGTH - PSG_SIZE[0]) / tau + 1
        # Convolution
        m_1 = Convolution2D(filters=NUM_OF_FILTERS,
                            kernel_size=PSG_SIZE,
                            strides=tau,
                            padding='valid',
                            name="pConv2D_" + str(idx))(psgMat)
        M.append(m_1)
        # Mean pooling
        k_1 = AveragePooling2D(pool_size=(pool_size, 1),
                               strides=1,
                               name="pAvePool_" + str(idx))(M[idx])
        K.append(k_1)
        # Max Pooling
        r_1 = GlobalMaxPooling2D(name="pMaxPool_" + str(idx))(K[idx])
        r.append(r_1)
    concat_r = concatenate(r)
    # Fusion Matrix and predict relevance
    # get h(q, d)
    # MLP(DENSE(len(r(q,d))))
    phi_h = Dense(alpha_size, activation="softmax", name="TrainMat")(homoMat)
    dot_prod = dot([concat_r, phi_h], axes=1, name="rel_dot")
    # tanh(dot(r.transpose * h))
    #pred = Activation("tanh", name="activation_tanh")(dot_prod)
    pred = Dense(1, activation="sigmoid", name="activation_sigmoid")(dot_prod)

    # We now have everything we need to define our model.
    model = Model(inputs=[psgMat, homoMat], outputs=pred)
    model.summary()
    '''
    from keras.utils import plot_model
    plot_model(model, to_file='model.png')
    '''
    return model
Esempio n. 9
0
def scse_block(blockInput, bottle=4, ssigmoid=False, alpha=0.0001):
    channel_cnt = int(blockInput.shape[-1])
    img_size = int(blockInput.shape[1])
    img_size2 = img_size * img_size

    x = GlobalMaxPooling2D(data_format="channels_last")(blockInput)
    x = Dense(int(channel_cnt / bottle))(x)
    x = BatchNormalization()(x)
    x = Activation("relu")(x)
    x = Dense(channel_cnt, kernel_regularizer=l2(alpha))(x)
    x = Activation("sigmoid")(x)
    x = Reshape((1, 1, channel_cnt))(x)
    x = Multiply()([blockInput, x])

    y = Conv2D(1, (1, 1), padding="same")(blockInput)
    if ssigmoid:
        y = Activation("elu")(y)
    if img_size >= 50:
        y_pooling = (4, 4) if img_size % 2 == 1 else (2, 2)
        kernel_size = (5, 5) if img_size % 2 == 1 else (3, 3)
        padding = "valid" if img_size % 2 == 1 else "same"

        y = MaxPooling2D(y_pooling)(y)

        pool_size = int(y.shape[1])
        pool_size2 = pool_size * pool_size

        y = Reshape((pool_size2, ))(y)
        y = Dense(int(pool_size2 / bottle))(y)
        y = BatchNormalization()(y)
        y = Activation("relu")(y)
        y = Dense(pool_size2, kernel_regularizer=l2(alpha))(y)
        y = Activation("sigmoid")(y)
        y = Reshape((pool_size, pool_size, 1))(y)
        y = Conv2DTranspose(1, kernel_size, strides=y_pooling,
                            padding=padding)(y)
    else:
        y = Reshape((img_size2, ))(y)
        y = Dense(int(img_size2 / bottle))(y)
        y = BatchNormalization()(y)
        y = Activation("relu")(y)
        y = Dense(img_size2, kernel_regularizer=l2(alpha))(y)
        y = Activation("sigmoid")(y)
        y = Reshape((img_size, img_size, 1))(y)

    y = Multiply()([blockInput, y])

    z = Add()([x, y])
    return z
Esempio n. 10
0
def make_critic(noise_dim=model_config["noise_dimension"]):
    # It is important in the WGAN-GP algorithm to NOT use batch normalization on the critic
    model = Sequential(name="critic")

    def add_block(n):
        model.add(Conv2D(n, (3, 3), kernel_initializer='he_normal'))
        model.add(LeakyReLU(alpha=0.2))
        model.add(MaxPooling2D())

    add_block(32)
    add_block(64)
    add_block(128)
    add_block(256)

    model.add(GlobalMaxPooling2D())
    model.add(Dense(1, kernel_initializer='he_normal'))
    return model
Esempio n. 11
0
    def build(self):

        inputs = Input(self.input_shape, name='inputs')

        layer1 = self._make_init_layer(inputs)
        layer2 = self._make_layer(layer1, layer_index=0)
        layer3 = self._make_layer(layer2, layer_index=1)
        layer4 = self._make_layer(layer3, layer_index=2)
        layer5 = self._make_layer(layer4, layer_index=3)
        gmp = GlobalMaxPooling2D(name='globalmaxpooling')(layer5)

        # if self.dropout:
        #     gmp = Dropout(self.dropout, name='dropout')(gmp)

        self.model = Model(inputs=inputs, outputs=gmp, name=self.name)

        return self
    def layer(input_tensor):
        channel_cnt = int(input_tensor.shape[-1])
        x = GlobalMaxPooling2D(data_format="channels_last")(input_tensor)
        x = Dense(int(channel_cnt // re))(x)
        x = BatchNormalization()(x)
        x = Activation("relu")(x)
        x = Dense(channel_cnt)(x)
        x = Activation("sigmoid")(x)
        x = Reshape((1, 1, channel_cnt))(x)
        x = Multiply()([blockInput, x])

        y = Conv2D(1, (1, 1), padding="same")(input_tensor)
        y = Activation("sigmoid")(y)
        y = Multiply()([blockInput, y])

        z = Add()([x, y])
        return z
Esempio n. 13
0
    def _network(net):
        f = filter_size
        for l in layers:
            net = Convolution2D(l, f, f, activation='relu',
                                border_mode='same')(net)
            net = MaxPooling2D(pool_size=[2, 2])(net)
            if norm: net = BatchNormalization(axis=-1)(net)

        if global_pooling:
            net = GlobalMaxPooling2D()(net)
        else:
            net = Flatten()(net)

        for l in fcl:
            net = Dense(l)(net)
            if norm: net = BatchNormalization(axis=-1)(net)
            if dropout: net = Dropout(dropout)(net)

        return net
Esempio n. 14
0
def MobileNetSlim(input_shape, alpha, depth_multiplier=1, output_classes=1, dropout=0.4):
	input = Input(shape=input_shape, name='flow')

	x = _conv_block(input, 32, alpha, strides=(2, 2))
	x = _depthwise_conv_block(x, 64, alpha, depth_multiplier, block_id=1)

	x = _depthwise_conv_block(x, 128, alpha, depth_multiplier, strides=(2, 2), block_id=2)
	x = _depthwise_conv_block(x, 128, alpha, depth_multiplier, block_id=3)

	x = _depthwise_conv_block(x, 256, alpha, depth_multiplier, strides=(2, 2), block_id=4)
	x1 = _depthwise_conv_block(x, 256, alpha, depth_multiplier, block_id=5)

	x = GlobalMaxPooling2D()(x)
	x = Dense(64, kernel_regularizer=regularizers.l2(0.01))(x)
	x = Activation('elu')(x)
	output = Dense(1, name='speed')(x)

	model = Model(inputs=input, outputs=output, name='optical_flow_encoder')
	return model
Esempio n. 15
0
def up_conv_block_seunet(x, x2, f, dropout=False):
    def attention(x):

        feature_map = x[0]
        coef = x[1]
        coef = K.expand_dims(K.expand_dims(coef, axis=-2), axis=-2)

        x = coef * feature_map

        return x

    x = UpSampling2D(size=(2, 2))(x)

    channels_nb = K.int_shape(x2)[-1]

    if channels_nb == 16:
        channels_nb_bottleneck = channels_nb // 16
    else:
        channels_nb_bottleneck = channels_nb // 32

    x3 = GlobalMaxPooling2D()(x2)
    x3 = Dense(channels_nb_bottleneck, activation='relu')(x3)
    x3 = Dense(channels_nb,
               activation='sigmoid',
               activity_regularizer=regularizers.l1(0.01))(x3)

    y = Lambda(lambda x: attention(x))([x2, x3])

    x = Concatenate(axis=-1)([x, y])

    f_new = f + channels_nb

    x = Conv2D(f_new, (3, 3), padding="same")(x)
    x = Conv2D(f_new, (3, 3), padding="same")(x)

    x = BatchNormalization(axis=-1)(x)
    if dropout:
        x = Dropout(0.5)(x)

    x = Activation("relu")(x)

    return x
def define_model(tokenizer, vocab_size, max_length):
    # feature extractor (encoder)
    inputs1 = Input(shape=(7, 7, 512))
    fe1 = GlobalMaxPooling2D()(inputs1)
    fe2 = Dense(128, activation='relu')(fe1)
    fe3 = RepeatVector(max_length)(fe2)
    # embedding
    inputs2 = Input(shape=(max_length,))
    emb2 = load_embedding(tokenizer, vocab_size, max_length)(inputs2)
    emb3 = LSTM(256, return_sequences=True)(emb2)
    emb4 = TimeDistributed(Dense(128, activation='relu'))(emb3)
    # merge inputs
    merged = concatenate([fe3, emb4])
    # language model (decoder)
    lm2 = LSTM(500)(merged)
    lm3 = Dense(500, activation='relu')(lm2)
    outputs = Dense(vocab_size, activation='softmax')(lm3)
    # tie it together [image, seq] [word]
    model = Model(inputs=[inputs1, inputs2], outputs=outputs)
    model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
    return model
Esempio n. 17
0
def define_model(vocab_size, max_length):
    """
    The model to generate SMILES strings

    """
    # feature extractor (encoder)
    # features from VGG16 model will be of shape (7,7,512)
    inputs1 = Input(shape=(7, 7, 512))

    # using activations from intermediate layer will have shape ((14,14,512))
    # inputs1 = Input(shape=(14,14,512))

    # ultimately bring photo features shape down to (max_length,128) so that it can merged at "merged"
    # GlobalMaxPooling outputs 2D tensor with shape (batch_size, channels) so (None, 512)
    fe1 = GlobalMaxPooling2D()(inputs1)
    # Dense will output (None, 128)
    fe2 = Dense(64, activation='relu')(fe1)
    # Repeat Vector will output (max_length,128)
    fe3 = RepeatVector(max_length)(fe2)
    # smiles embedding layer outputs (25,128)
    inputs2 = Input(shape=(max_length, ))
    emb2 = Embedding(vocab_size, 50, mask_zero=True)(inputs2)
    emb3 = LSTM(256, return_sequences=True)(emb2)
    emb4 = TimeDistributed(Dense(128, activation='relu'))(emb3)
    # merge inputs takes the (25,128) inputs from fe3 and emb4
    merged = concatenate([fe3, emb4])
    # language model
    lm2 = LSTM(500)(merged)
    lm3 = Dense(500, activation='relu')(lm2)
    outputs = Dense(vocab_size, activation='softmax')(lm3)

    model = Model(inputs=[inputs1, inputs2], outputs=outputs)
    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])
    print(model.summary())

    return model
Esempio n. 18
0
def base_network(input_size, descriptor_size, trainable=False):
    '''
    Define the base network model
    :param input_size: shape of input images
    :param descriptor_size: embedding size used to encode input images
    :return: network model
    '''
    base_model = ResNet50(input_shape=input_size,
                          weights='imagenet',
                          include_top=False)
    # base_model.trainable = trainable

    x = GlobalMaxPooling2D(name='global_max_1')(
        base_model.get_layer('activation_49').output)
    # x = GlobalMaxPooling2D(name='global_max_1')(base_model.get_layer('block4_pool').output)
    x = Dense(descriptor_size * 4,
              kernel_regularizer=l2(1e-3),
              activation='relu',
              kernel_initializer='he_uniform',
              name='dense_descriptor_1')(x)
    x = Dense(descriptor_size * 2,
              kernel_regularizer=l2(1e-3),
              activation='relu',
              kernel_initializer='he_uniform',
              name='dense_descriptor_2')(x)
    descriptor = Dense(descriptor_size,
                       kernel_regularizer=l2(1e-3),
                       kernel_initializer='he_uniform',
                       name='dense_descriptor_3')(x)
    norm_descriptor = Lambda(lambda x: K.l2_normalize(x, axis=-1))(descriptor)
    network = Model(inputs=[base_model.input], outputs=[norm_descriptor])
    # return norm_descriptor
    network.summary()

    for layer in base_model.layers:
        layer.trainable = trainable

    return network
def channel_attention(input_feature, ratio=8):

    channel_axis = 1 if K.image_data_format() == "channels_first" else -1
    channel = input_feature._keras_shape[channel_axis]

    shared_layer_one = Dense(channel // ratio,
                             activation='relu',
                             kernel_initializer='he_normal',
                             use_bias=True,
                             bias_initializer='zeros')
    shared_layer_two = Dense(channel,
                             kernel_initializer='he_normal',
                             use_bias=True,
                             bias_initializer='zeros')

    avg_pool = GlobalAveragePooling2D()(input_feature)
    avg_pool = Reshape((1, 1, channel))(avg_pool)
    assert avg_pool._keras_shape[1:] == (1, 1, channel)
    avg_pool = shared_layer_one(avg_pool)
    assert avg_pool._keras_shape[1:] == (1, 1, channel // ratio)
    avg_pool = shared_layer_two(avg_pool)
    assert avg_pool._keras_shape[1:] == (1, 1, channel)

    max_pool = GlobalMaxPooling2D()(input_feature)
    max_pool = Reshape((1, 1, channel))(max_pool)
    assert max_pool._keras_shape[1:] == (1, 1, channel)
    max_pool = shared_layer_one(max_pool)
    assert max_pool._keras_shape[1:] == (1, 1, channel // ratio)
    max_pool = shared_layer_two(max_pool)
    assert max_pool._keras_shape[1:] == (1, 1, channel)

    cbam_feature = Add()([avg_pool, max_pool])
    cbam_feature = Activation('sigmoid')(cbam_feature)

    if K.image_data_format() == "channels_first":
        cbam_feature = Permute((3, 1, 2))(cbam_feature)

    return multiply([input_feature, cbam_feature])
Esempio n. 20
0
def build_model(img_size=128):
    inputs = Input((img_size, img_size, 3))

    x = Conv2D(8, (3, 3), activation='relu', padding='same')(inputs)
    x = Conv2D(8, (3, 3), activation='relu', padding='same')(x)
    x = MaxPooling2D((2, 2))(x)

    x = Conv2D(16, (3, 3), activation='relu', padding='same')(x)
    x = Conv2D(16, (3, 3), activation='relu', padding='same')(x)
    x = MaxPooling2D((2, 2))(x)

    x = Conv2D(32, (3, 3), activation='relu', padding='same')(x)
    x = Conv2D(32, (3, 3), activation='relu', padding='same')(x)
    x = Conv2D(32, (3, 3), activation='relu', padding='same')(x)
    x = MaxPooling2D((2, 2))(x)

    x = GlobalMaxPooling2D()(x)
    x = Dense(512, activation='relu')(x)
    x = Dropout(0.1)(x)
    outputs = Dense(2, activation='softmax')(x)

    model = Model(inputs=[inputs], outputs=[outputs])
    return model
Esempio n. 21
0
def get_model(classes):
    model = Sequential()

    model.add(
        Convolution2D(64, (3, 3),
                      activation="relu",
                      input_shape=(None, None, 3)))
    model.add(Convolution2D(64, (3, 3), activation="relu", padding="same"))
    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
    model.add(Convolution2D(128, (3, 3), activation='relu', padding='same'))
    model.add(Convolution2D(128, (3, 3), activation='relu', padding='same'))
    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
    model.add(Convolution2D(256, (3, 3), activation='relu', padding='same'))
    model.add(Convolution2D(256, (3, 3), activation='relu', padding='same'))
    model.add(Convolution2D(256, (3, 3), activation='relu', padding='same'))
    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
    model.add(Convolution2D(512, (3, 3), activation='relu', padding='same'))
    model.add(Convolution2D(512, (3, 3), activation='relu', padding='same'))
    model.add(Convolution2D(512, (3, 3), activation='relu', padding='same'))
    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
    model.add(Convolution2D(512, (3, 3), activation='relu', padding='same'))
    model.add(Convolution2D(512, (3, 3), activation='relu', padding='same'))
    model.add(Convolution2D(512, (3, 3), activation='relu', padding='same'))
    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
    model.add(GlobalMaxPooling2D())
    model.add(Dense(512, activation='relu'))
    model.add(Dense(512, activation='relu'))
    model.add(Dense(classes, activation='softmax'))

    #     model.summary()

    sgd = optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
    model.compile(optimizer=sgd,
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

    return model
Esempio n. 22
0
def define_model(vocab_size, max_length):
    # feature extractor (encoder)
    inputs1 = Input(shape=(5, 5, 2048))
    fe1 = GlobalMaxPooling2D()(inputs1)
    fe2 = Dense(128, activation='relu')(fe1)
    fe3 = RepeatVector(max_length)(fe2)
    # embedding
    inputs2 = Input(shape=(max_length, ))
    emb2 = Embedding(vocab_size, 50, mask_zero=True)(inputs2)
    emb3 = LSTM(512, return_sequences=True)(emb2)
    emb4 = TimeDistributed(Dense(128, activation='relu'))(emb3)
    # merge inputs
    merged = concatenate([fe3, emb4])
    # language model (decoder)
    lm2 = LSTM(512)(merged)
    lm3 = Dense(512, activation='relu')(lm2)
    outputs = Dense(vocab_size, activation='softmax')(lm3)
    # tie it together [image, seq] [word]
    model = Model(inputs=[inputs1, inputs2], outputs=outputs)

    # # Learning rate for the initial phase of training.
    # initial_learning_rate = 2.0
    # learning_rate_decay_factor = 0.5
    # num_epochs_per_decay = 8.0

    # # If not None, clip gradients to this value.
    # clip_gradients = 5.0

    # # Optimizer for training the model.
    # sgd = optimizers.SGD(lr=initial_learning_rate, clipvalue=clip_gradients)

    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])
    # print(model.summary())
    # plot_model(model, show_shapes=True, to_file='plot.png')
    return model
def __create_mobilenet(classes, img_input, include_top, alpha,
                       depth_multiplier, dropout, pooling):
    ''' Creates a MobileNet model with specified parameters
    Args:
        classes: Number of output classes
        img_input: Input tensor or layer
        include_top: Flag to include the last dense layer
        alpha: width multiplier of the MobileNet.
        depth_multiplier: depth multiplier for depthwise convolution
                          (also called the resolution multiplier)
        dropout: dropout rate
        pooling: Optional pooling mode for feature extraction
            when `include_top` is `False`.
            - `None` means that the output of the model will be
                the 4D tensor output of the
                last convolutional layer.
            - `avg` means that global average pooling
                will be applied to the output of the
                last convolutional layer, and thus
                the output of the model will be a 2D tensor.
            - `max` means that global max pooling will
                be applied.
    Returns: a Keras Model
    '''

    x = __conv_block(img_input, 32, alpha, strides=(2, 2))
    x = __depthwise_conv_block(x, 64, alpha, depth_multiplier, id=1)

    x = __depthwise_conv_block(x,
                               128,
                               alpha,
                               depth_multiplier,
                               strides=(2, 2),
                               id=2)
    x = __depthwise_conv_block(x, 128, alpha, depth_multiplier, id=3)

    x = __depthwise_conv_block(x,
                               256,
                               alpha,
                               depth_multiplier,
                               strides=(2, 2),
                               id=4)
    x = __depthwise_conv_block(x, 256, alpha, depth_multiplier, id=5)

    x = __depthwise_conv_block(x,
                               512,
                               alpha,
                               depth_multiplier,
                               strides=(2, 2),
                               id=6)
    x = __depthwise_conv_block(x, 512, alpha, depth_multiplier, id=7)
    x = __depthwise_conv_block(x, 512, alpha, depth_multiplier, id=8)
    x = __depthwise_conv_block(x, 512, alpha, depth_multiplier, id=9)
    x = __depthwise_conv_block(x, 512, alpha, depth_multiplier, id=10)
    x = __depthwise_conv_block(x, 512, alpha, depth_multiplier, id=11)

    x = __depthwise_conv_block(x,
                               1024,
                               alpha,
                               depth_multiplier,
                               strides=(2, 2),
                               id=12)
    x = __depthwise_conv_block(x, 1024, alpha, depth_multiplier, id=13)

    if include_top:
        if K.image_data_format() == 'channels_first':
            shape = (int(1024 * alpha), 1, 1)
        else:
            shape = (1, 1, int(1024 * alpha))

        x = GlobalAveragePooling2D()(x)
        x = Reshape(shape, name='reshape_1')(x)
        x = Dropout(dropout, name='dropout')(x)
        x = Convolution2D(classes, (1, 1), padding='same',
                          name='conv_preds')(x)
        x = Activation('softmax', name='act_softmax')(x)
        x = Reshape((classes, ), name='reshape_2')(x)
    else:
        if pooling == 'avg':
            x = GlobalAveragePooling2D()(x)
        elif pooling == 'max':
            x = GlobalMaxPooling2D()(x)
    return x
Esempio n. 24
0
class L2Model:
    ''' Class of forward model'''
    def __init__(self):

        self.network = VGG16(weights='imagenet')

    def fit(self, (generator_training, n_train), (generator_val, n_val)):
        '''Trains the model for a fixed number of epochs and iterations.
           # Arguments
                X_train: input data, as a Numpy array or list of Numpy arrays
                    (if the model has multiple inputs).
                Y_train : labels, as a Numpy array.
                batch_size: integer. Number of samples per gradient update.
                learning_rate: float, learning rate
                nb_epoch: integer, the number of epochs to train the model.
                validation_split: float (0. < x < 1).
                    Fraction of the data to use as held-out validation data.
                validation_data: tuple (x_val, y_val) or tuple
                    (x_val, y_val, val_sample_weights) to be used as held-out
                    validation data. Will override validation_split.
                it: integer, number of iterations of the algorithm

                

            # Returns
                A `History` object. Its `History.history` attribute is
                a record of training loss values and metrics values
                at successive epochs, as well as validation loss values
                and validation metrics values (if applicable).
            '''

        if pool is None:
            for pop in range(nbPop):
                self.network.pop()

            if dropoutConf == -1:
                self.network.layers[-2].rate = 0.0
            elif dropoutConf == 1:
                self.network.add(Dropout(0.5))
            elif dropoutConf == 2:
                self.network.layers[-2].rate = 0.0
                self.network.add(Dropout(0.5))

        else:
            for pop in range(4):
                self.network.pop()
            if pool == "max":
                self.network.add(GlobalMaxPooling2D())
            elif pool == "avg":
                self.network.add(GlobalAveragePooling2D())
            else:
                print "ERROR: pooling not valide"
                exit(-1)

        if BNBA:
            self.network.layers[-1].activation = Activation('linear')
            self.network.add(BatchNormalization())
            self.network.add(Activation('relu'))
        elif BN:

            self.network.add(BatchNormalization())
        self.network.add(Dense(LOW_DIM, activation='linear', trainable=True))

        self.network.summary()

        # train only some layers
        for layer in self.network.layers[:layer_nb]:
            layer.trainable = False
        for layer in self.network.layers[layer_nb:]:
            layer.trainable = True
        self.network.layers[-1].trainable = True

        # compile the model

        self.network.compile(optimizer=optim, loss='mse', metrics=['mae'])

        self.network.summary()
        csv_logger = CSVLogger(ROOTPATH + "VGG16_" + PB_FLAG + "_" + idOar +
                               '_training.log')

        checkpointer = ModelCheckpoint(filepath=ROOTPATH + "VGG16_" + PB_FLAG +
                                       "_" + idOar + "_weights.hdf5",
                                       monitor='val_loss',
                                       verbose=1,
                                       save_weights_only=True,
                                       save_best_only=True,
                                       mode='min')

        early_stopping = EarlyStopping(monitor='val_loss', patience=PATIENCE)

        class CheckNan(Callback):
            def on_batch_end(self, batch, logs={}):
                if math.isnan(logs.get('loss')):
                    print "\nReach a NAN\n"
                    sys.exit()

        # train the model on the new data for a few epochs
        if epochLength < 0:
            spe = n_train
        else:
            spe = epochLength

        self.network.fit_generator(
            generator_training,
            samples_per_epoch=spe,
            nb_epoch=NB_EPOCH * int(n_train / (1.0 * spe)),
            verbose=1,
            callbacks=[checkpointer, csv_logger, early_stopping,
                       CheckNan()],
            validation_data=generator_val,
            nb_val_samples=n_val)

        self.network.load_weights(ROOTPATH + "VGG16_" + PB_FLAG + "_" + idOar +
                                  "_weights.hdf5")
Esempio n. 25
0
 def _GlobalMax_heads(self, outputs):
     outputs = tf.transpose(
         outputs, [0, 3, 2, 1])  # [batch_size, dim, max_seq_len, num_heads]
     outputs = GlobalMaxPooling2D()(outputs)
     return outputs
Esempio n. 26
0
def VGG16(include_top=True,
          weights='imagenet',
          input_tensor=None,
          input_shape=None,
          pooling='Max',
          classes=1000):

    if weights not in {'imagenet', None}:
        raise ValueError('The `weights` argument should be either '
                         '`None` (random initialization) or `imagenet` '
                         '(pre-training on ImageNet).')

    if weights == 'imagenet' and include_top and classes != 1000:
        raise ValueError('If using `weights` as imagenet with `include_top`'
                         ' as true, `classes` should be 1000')
    # Determine proper input shape
    input_shape = _obtain_input_shape(input_shape,
                                      default_size=224,
                                      min_size=48,
                                      data_format=K.image_data_format(),
                                      include_top=include_top)

    if input_tensor is None:
        img_input = Input(shape=input_shape)
    else:
        if not K.is_keras_tensor(input_tensor):
            img_input = Input(tensor=input_tensor, shape=input_shape)
        else:
            img_input = input_tensor
    # Block 1
    x = Conv2D(64, (3, 3),
               activation='relu',
               padding='same',
               name='block1_conv1')(img_input)
    x = Conv2D(64, (3, 3),
               activation='relu',
               padding='same',
               name='block1_conv2')(x)
    x = AveragePooling2D((2, 2), strides=(2, 2))(x)

    # Block 2
    x = Conv2D(128, (3, 3),
               activation='relu',
               padding='same',
               name='block2_conv1')(x)
    x = Conv2D(128, (3, 3),
               activation='relu',
               padding='same',
               name='block2_conv2')(x)
    x = AveragePooling2D((2, 2), strides=(2, 2))(x)

    # Block 3
    x = Conv2D(256, (3, 3),
               activation='relu',
               padding='same',
               name='block3_conv1')(x)
    x = Conv2D(256, (3, 3),
               activation='relu',
               padding='same',
               name='block3_conv2')(x)
    x = Conv2D(256, (3, 3),
               activation='relu',
               padding='same',
               name='block3_conv3')(x)
    x = AveragePooling2D((2, 2), strides=(2, 2))(x)

    # Block 4
    x = Conv2D(512, (3, 3),
               activation='relu',
               padding='same',
               name='block4_conv1')(x)
    x = Conv2D(512, (3, 3),
               activation='relu',
               padding='same',
               name='block4_conv2')(x)
    x = Conv2D(512, (3, 3),
               activation='relu',
               padding='same',
               name='block4_conv3')(x)
    x = AveragePooling2D((2, 2), strides=(2, 2))(x)

    # Block 5
    x = Conv2D(512, (3, 3),
               activation='relu',
               padding='same',
               name='block5_conv1')(x)
    x = Conv2D(512, (3, 3),
               activation='relu',
               padding='same',
               name='block5_conv2')(x)
    x = Conv2D(512, (3, 3),
               activation='relu',
               padding='same',
               name='block5_conv3')(x)
    x = AveragePooling2D((2, 2), strides=(2, 2))(x)

    if include_top:
        # Classification block
        x = Flatten(name='flatten')(x)
        x = Dense(4096, activation='relu', name='fc1')(x)
        x = Dense(4096, activation='relu', name='fc2')(x)
        x = Dense(classes, activation='softmax', name='predictions')(x)

    else:
        if pooling == 'Average':
            x = GlobalAveragePooling2D()(x)
        elif pooling == 'Max':
            x = GlobalMaxPooling2D()(x)

    # Ensure that the model takes into account
    # any potential predecessors of `input_tensor`.
    if input_tensor is not None:
        inputs = get_source_inputs(input_tensor)
    else:
        inputs = img_input
    # Create model.
    model = Model(inputs, x, name='vgg16')

    # load weights
    if weights == 'imagenet':
        if include_top:
            weights_path = get_file(
                'vgg16_weights_tf_dim_ordering_tf_kernels.h5',
                WEIGHTS_PATH,
                cache_subdir='models')
        else:
            weights_path = get_file(
                'vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5',
                WEIGHTS_PATH_NO_TOP,
                cache_subdir='models')
        model.load_weights(weights_path)
        if K.backend() == 'theano':
            layer_utils.convert_all_kernels_in_model(model)

        if K.image_data_format() == 'channels_first':
            if include_top:
                maxpool = model.get_layer(name='block5_pool')
                shape = maxpool.output_shape[1:]
                dense = model.get_layer(name='fc1')
                layer_utils.convert_dense_weights_data_format(
                    dense, shape, 'channels_first')

            if K.backend() == 'tensorflow':
                warnings.warn('You are using the TensorFlow backend, yet you '
                              'are using the Theano '
                              'image data format convention '
                              '(`image_data_format="channels_first"`). '
                              'For best performance, set '
                              '`image_data_format="channels_last"` in '
                              'your Keras config '
                              'at ~/.keras/keras.json.')
    return model
Esempio n. 27
0
def resnet50(include_top=True,
             weights='imagenet',
             input_tensor=None,
             input_shape=None,
             pooling=None,
             classes=1000):

    RESNET50_WEIGHTS_PATH = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.2/resnet50_weights_tf_dim_ordering_tf_kernels.h5'  #'https://github.com/rcmalli/keras-vggface/releases/download/v2.0/rcmalli_vggface_tf_resnet50.h5'
    RESNET50_WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.2/resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5'  #'https://github.com/rcmalli/keras-vggface/releases/download/v2.0/rcmalli_vggface_tf_notop_resnet50.h5'

    input_shape = _obtain_input_shape(input_shape,
                                      default_size=224,
                                      min_size=32,
                                      data_format=K.image_data_format(),
                                      require_flatten=include_top,
                                      weights=weights)

    if input_tensor is None:
        img_input = Input(shape=input_shape)
    else:
        if not K.is_keras_tensor(input_tensor):
            img_input = Input(tensor=input_tensor, shape=input_shape)
        else:
            img_input = input_tensor

    if K.image_data_format() == "channels_last":
        bn_axis = 3
    else:
        bn_axis = 1

    x = Conv2D(64, (7, 7),
               use_bias=False,
               strides=(2, 2),
               padding='same',
               name='conv1/7x7_s2')(img_input)
    x = BatchNormalization(axis=bn_axis, name='conv1/7x7_s2/bn')(x)
    x = Activation('relu')(x)
    x = MaxPooling2D((3, 3), strides=(2, 2))(x)

    x = resnet_conv_block(x,
                          3, [64, 64, 256],
                          stage=2,
                          block=1,
                          strides=(1, 1))
    x = resnet_identity_block(x, 3, [64, 64, 256], stage=2, block=2)
    x = resnet_identity_block(x, 3, [64, 64, 256], stage=2, block=3)

    x = resnet_conv_block(x, 3, [128, 128, 512], stage=3, block=1)
    x = resnet_identity_block(x, 3, [128, 128, 512], stage=3, block=2)
    x = resnet_identity_block(x, 3, [128, 128, 512], stage=3, block=3)
    x = resnet_identity_block(x, 3, [128, 128, 512], stage=3, block=4)

    x = resnet_conv_block(x, 3, [256, 256, 1024], stage=4, block=1)
    x = resnet_identity_block(x, 3, [256, 256, 1024], stage=4, block=2)
    x = resnet_identity_block(x, 3, [256, 256, 1024], stage=4, block=3)
    x = resnet_identity_block(x, 3, [256, 256, 1024], stage=4, block=4)
    x = resnet_identity_block(x, 3, [256, 256, 1024], stage=4, block=5)
    x = resnet_identity_block(x, 3, [256, 256, 1024], stage=4, block=6)

    x = resnet_conv_block(x, 3, [512, 512, 2048], stage=5, block=1)
    x = resnet_identity_block(x, 3, [512, 512, 2048], stage=5, block=2)
    x = resnet_identity_block(x, 3, [512, 512, 2048], stage=5, block=3)

    x = AveragePooling2D((7, 7), name='avg_pool')(x)

    if include_top:
        x = Flatten()(x)
        x = Dense(classes, activation='softmax', name='classifier')(x)
    else:
        if pooling == 'avg':
            x = GlobalAveragePooling2D()(x)
        elif pooling == 'max':
            x = GlobalMaxPooling2D()(x)

    if input_tensor is not None:
        inputs = get_source_inputs(input_tensor)
    else:
        inputs = img_input

    model = Model(inputs, x, name='vggface_resnet50')

    model.summary()
    '''
    if weights == 'imagenet':
        if include_top:
            weights_path = get_file('resnet50_weights_tf_dim_ordering_tf_kernels.h5',
                                    RESNET50_WEIGHTS_PATH,
                                    cache_subdir='./models')
        else:
            weights_path = get_file('resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5',
                                     RESNET50_WEIGHTS_PATH_NO_TOP,
                                    cache_dir="./models")
        
        model.load_weights(weights_path)

        if K.backend() == "theano":
            layer_utils.convert_all_kernels_in_model(model)
            if include_top:
                maxpool = model.get_layer(name='avg_pool')
                shape = maxpool.output_shape[1:]
                dense = model.get_layer(name='classifier')
                layer_utils.convert_dense_weights_data_format(dense, shape, 'channels_first')
            
        if K.image_data_format() == "channels_first" and K.backend() == 'tensorflow':
            warnings.warn('You are using the TensorFlow backend, yet you '
                          'are using the Theano '
                          'image data format convention '
                          '(`image_data_format="channels_first"`). '
                          'For best performance, set '
                          '`image_data_format="channels_last"` in '
                          'your Keras config '
                          'at ~/.keras/keras.json.')
    elif weights is not None:
        model.load_weights(weights)
    '''

    return model
def InceptionV4(include_top=True,
                weights='imagenet',
                classes=1001,
                pooling='avg',
                input_shape=(299, 299, 3),
                dropout_keep_prob=0.2):
    '''

    Args:
        classes: number of classes
        dropout_keep_prob: float, the fraction to keep before final layer.
        weights: 'imagenet' or None
        include_top: whether to include the top layers
        input_shape: input shape
        pooling: type of pooling in the end (if no top layers is selected)

    Returns:
        logits: the logits outputs of the model.
    '''

    # Input Shape is 299 x 299 x 3 (tf) or 3 x 299 x 299 (th)
    if K.image_data_format() == 'channels_first':
        inputs = Input(tuple(reversed(input_shape)))
    else:
        inputs = Input(input_shape)

    # Make inception base
    x = inception_v4_base(inputs)

    # Final pooling and prediction
    if include_top:
        # 1 x 1 x 1536
        x = AveragePooling2D((8, 8), padding='valid')(x)
        x = Dropout(dropout_keep_prob)(x)
        x = Flatten()(x)
        # 1536
        x = Dense(units=classes, activation='softmax')(x)

    model = Model(inputs, x, name='inception_v4')

    # load weights
    if weights == 'imagenet':
        if K.image_data_format() == 'channels_first':
            if K.backend() == 'tensorflow':
                warnings.warn('You are using the TensorFlow backend, yet you '
                              'are using the Theano '
                              'image data format convention '
                              '(`image_data_format="channels_first"`). '
                              'For best performance, set '
                              '`image_data_format="channels_last"` in '
                              'your Keras config '
                              'at ~/.keras/keras.json.')
        if include_top:
            weights_path = get_file(
                'inception-v4_weights_tf_dim_ordering_tf_kernels.h5',
                WEIGHTS_PATH,
                cache_subdir='models',
                md5_hash='9fe79d77f793fe874470d84ca6ba4a3b')
            model.load_weights(weights_path)
        else:
            weights_path = get_file(
                'inception-v4_weights_tf_dim_ordering_tf_kernels_notop.h5',
                WEIGHTS_PATH_NO_TOP,
                cache_subdir='models',
                md5_hash='9296b46b5971573064d12e4669110969')
            model.load_weights(weights_path)

            if pooling == 'max':
                pool = GlobalMaxPooling2D()(model.output)
            else:
                pool = GlobalAveragePooling2D()(model.output)

            model = Model(model.input, pool)

    return model
Esempio n. 29
0
def __create_res_next_imagenet(nb_classes, img_input, include_top, depth, cardinality=32, width=4,
                               weight_decay=5e-4, pooling=None):
    ''' Creates a ResNeXt model with specified parameters
    Args:
        nb_classes: Number of output classes
        img_input: Input tensor or layer
        include_top: Flag to include the last dense layer
        depth: Depth of the network. List of integers.
               Increasing cardinality improves classification accuracy,
        width: Width of the network.
        weight_decay: weight_decay (l2 norm)
        pooling: Optional pooling mode for feature extraction
            when `include_top` is `False`.
            - `None` means that the output of the model will be
                the 4D tensor output of the
                last convolutional layer.
            - `avg` means that global average pooling
                will be applied to the output of the
                last convolutional layer, and thus
                the output of the model will be a 2D tensor.
            - `max` means that global max pooling will
                be applied.
    Returns: a Keras Model
    '''

    if type(depth) is list or type(depth) is tuple:
        # If a list is provided, defer to user how many blocks are present
        N = list(depth)
    else:
        # Otherwise, default to 3 blocks each of default number of group convolution blocks
        N = [(depth - 2) // 9 for _ in range(3)]

    filters = cardinality * width
    filters_list = []

    for i in range(len(N)):
        filters_list.append(filters)
        filters *= 2  # double the size of the filters

    x = __initial_conv_block_inception(img_input, weight_decay)

    # block 1 (no pooling)
    for i in range(N[0]):
        x = __bottleneck_block(x, filters_list[0], cardinality, strides=1, weight_decay=weight_decay)

    N = N[1:]  # remove the first block from block definition list
    filters_list = filters_list[1:]  # remove the first filter from the filter list

    # block 2 to N
    for block_idx, n_i in enumerate(N):
        for i in range(n_i):
            if i == 0:
                x = __bottleneck_block(x, filters_list[block_idx], cardinality, strides=2,
                                       weight_decay=weight_decay)
            else:
                x = __bottleneck_block(x, filters_list[block_idx], cardinality, strides=1,
                                       weight_decay=weight_decay)

    if include_top:
        x = GlobalAveragePooling2D()(x)
        x = Dense(nb_classes, use_bias=False, kernel_regularizer=l2(weight_decay),
                  kernel_initializer='he_normal', activation='softmax')(x)
    else:
        if pooling == 'avg':
            x = GlobalAveragePooling2D()(x)
        elif pooling == 'max':
            x = GlobalMaxPooling2D()(x)

    return x
Esempio n. 30
0
def main(unused_argv):

    sess = tf.Session()
    K.set_session(sess)

    model = Sequential()
    # Keras layers can be called on TensorFlow tensors:
    model.add(
        Conv2D(16,
               kernel_size=(3, 3),
               activation='relu',
               input_shape=(img_size, img_size, 3)))
    #model.add(Conv2D(16, kernel_size = (3,3), activation = 'relu',))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    #model.add(Conv2D(32, kernel_size = (3,3), activation = 'relu'))
    model.add(Conv2D(32, kernel_size=(3, 3), activation='relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    #model.add(Conv2D(64, kernel_size = (3, 3), activation = 'relu'))
    model.add(Conv2D(64, kernel_size=(3, 3), activation='relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    #model.add(Conv2D(128, kernel_size = (3, 3), activation = 'relu'))
    model.add(Conv2D(128, kernel_size=(3, 3), activation='relu'))
    #print(model.layers[-1].output)
    model.add(GlobalMaxPooling2D())
    model.add(Dropout(.4))
    model.add(
        Dense(128, activation='relu',
              kernel_regularizer=regularizers.l2(0.01)))
    model.add(Dropout(.4))
    model.add(
        Dense(64, activation='relu', kernel_regularizer=regularizers.l2(0.01)))
    model.add(Dropout(.4))
    model.add(Dense(1, activation='sigmoid'))
    model.compile(loss='binary_crossentropy',
                  optimizer='adadelta',
                  metrics=['accuracy'])

    train_datagen = ImageDataGenerator(rescale=1. / 255,
                                       shear_range=0.2,
                                       zoom_range=0.2,
                                       horizontal_flip=True)
    # this is the augmentation configuration we will use for testing:
    # only rescaling
    test_datagen = ImageDataGenerator(rescale=1. / 255)
    #print(input_fn())
    # this is a generator that will read pictures found in
    # subfolders of 'data/train', and indefinitely generate
    # batches of augmented image data
    train_generator = train_datagen.flow_from_directory(
        'C:\\Users\\User\\Documents\\FirstRoundTraining',  # this is the target directory
        target_size=(img_size,
                     img_size),  # all images will be resized to 128x128
        batch_size=batch_size,
        class_mode='binary'
    )  # since we use binary_crossentropy loss, we need binary label
    validation_generator = test_datagen.flow_from_directory(
        'C:\\Users\\User\\Documents\\SecondRoundTraining',
        target_size=(img_size, img_size),
        batch_size=batch_size,
        class_mode='binary')
    val = model.fit_generator(
        train_generator,
        steps_per_epoch=NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN // batch_size,
        epochs=epochs,
        validation_data=validation_generator,
        validation_steps=NUM_EXAMPLES_PER_EPOCH_FOR_EVAL // batch_size,
        callbacks=[
            TensorBoard(log_dir='SnekChecker' + model_num),
            ModelCheckpoint(model_num + 'try.hdf5',
                            save_best_only=True,
                            mode='min')
        ])
    predict_datagen = ImageDataGenerator(rescale=1. / 255)
    predict_generator = predict_datagen.flow_from_directory(
        'C:\\Users\\User\\Documents\\CheckSneks',  # this is the target directory
        target_size=(img_size,
                     img_size),  # all images will be resized to 128x128
        batch_size=batch_size,
        shuffle=False,
        class_mode='binary')

    val = model.predict_generator(predict_generator,
                                  steps=num_pics // batch_size + 1)
    files = sorted(os.listdir(directory + "\\Snakes"))
    val = zip(files, val[:num_pics])
    with open(model_num + "_predictions.txt", 'w+') as f:
        [f.write(str(x) + ':' + str(y) + '\n') for x, y in val]