from tensorflow.keras import layers
from tensorflow.keras import Input, Model

lstm = layers.LSTM(32)

left_input = Input(shape=(None, 128))
left_output = lstm(left_input)

right_input = Input(shape=(None, 128))
right_output = lstm(right_input)

merged = layers.concatenate([left_output, right_output], axis=-1)
predictions = layers.Dense(1, activation='sigmoid')(merged)

model = Model([left_input, right_input], predictions)

model.summary()

# model.fit([left_data, right_data], targets)
    x = bottleneck_block(64, x)

# Double the size of filters and reduce feature maps by 75% (strides=2, 2) to fit the next Residual Group
x = conv_block(128, x)

# Second Residual Block Group of 128 filters
for _ in range(3):
    x = bottleneck_block(128, x)

# Double the size of filters and reduce feature maps by 75% (strides=2, 2) to fit the next Residual Group
x = conv_block(256, x)

# Third Residual Block Group of 256 filters
for _ in range(22):
    x = bottleneck_block(256, x)

# Double the size of filters and reduce feature maps by 75% (strides=2, 2) to fit the next Residual Group
x = conv_block(512, x)

# Fourth Residual Block Group of 512 filters
for _ in range(2):
    x = bottleneck_block(512, x)

# Now Pool at the end of all the convolutional residual blocks
x = layers.GlobalAveragePooling2D()(x)

# Final Dense Outputting Layer for 1000 outputs
outputs = layers.Dense(1000, activation='softmax')(x)

model = Model(inputs, outputs)
Exemple #3
0
 def _update_models(self):
     item_function = self.layers[1](self.layers[0](self.inputs[1]))
     self.item_model = Model(self.inputs[1], item_function)
     print(self.gru.weights)
Exemple #4
0
x = inputs

# loop over the number of filters
for f in filters:
    # apply a CONV => RELU => BN operation
    x = Conv2D(f, (3, 3), strides=2, padding="same")(x)
    x = LeakyReLU(alpha=0.2)(x)
    x = BatchNormalization(axis=chanDim)(x)

# flatten the network and then construct our latent vector
volumeSize = K.int_shape(x)
x = Flatten()(x)
latent = Dense(latentDim)(x)

# build the encoder model
encoder = Model(inputs, latent, name="encoder")

# start building the decoder model which will accept the
# output of the encoder as its inputs
latentInputs = Input(shape=(latentDim, ))
x = Dense(np.prod(volumeSize[1:]))(latentInputs)
x = Reshape((volumeSize[1], volumeSize[2], volumeSize[3]))(x)

# loop over our number of filters again, but this time in
# reverse order
for f in filters[::-1]:
    # apply a CONV_TRANSPOSE => RELU => BN operation
    x = Conv2DTranspose(f, (3, 3), strides=2, padding="same")(x)
    x = LeakyReLU(alpha=0.2)(x)
    x = BatchNormalization(axis=chanDim)(x)
Exemple #5
0
def build_stage2_generator():
    """
    Create Stage-II generator containing the CA Augmentation Network,
    the image encoder and the generator network
    """

    # 1. CA Augmentation Network
    input_layer = Input(shape=(1024, ))
    input_lr_images = Input(shape=(64, 64, 3))

    ca = Dense(256)(input_layer)
    mean_logsigma = LeakyReLU(alpha=0.2)(ca)
    c = Lambda(generate_c)(mean_logsigma)

    # 2. Image Encoder
    x = ZeroPadding2D(padding=(1, 1))(input_lr_images)
    x = Conv2D(128, kernel_size=(3, 3), strides=1, use_bias=False)(x)
    x = ReLU()(x)

    x = ZeroPadding2D(padding=(1, 1))(x)
    x = Conv2D(256, kernel_size=(4, 4), strides=2, use_bias=False)(x)
    x = BatchNormalization()(x)
    x = ReLU()(x)

    x = ZeroPadding2D(padding=(1, 1))(x)
    x = Conv2D(512, kernel_size=(4, 4), strides=2, use_bias=False)(x)
    x = BatchNormalization()(x)
    x = ReLU()(x)

    # 3. Joint
    c_code = Lambda(joint_block)([c, x])

    x = ZeroPadding2D(padding=(1, 1))(c_code)
    x = Conv2D(512, kernel_size=(3, 3), strides=1, use_bias=False)(x)
    x = BatchNormalization()(x)
    x = ReLU()(x)

    # 4. Residual blocks
    x = residual_block(x)
    x = residual_block(x)
    x = residual_block(x)
    x = residual_block(x)

    # 5. Upsampling blocks
    x = UpSampling2D(size=(2, 2))(x)
    x = Conv2D(512, kernel_size=3, padding="same", strides=1,
               use_bias=False)(x)
    x = BatchNormalization()(x)
    x = ReLU()(x)

    x = UpSampling2D(size=(2, 2))(x)
    x = Conv2D(256, kernel_size=3, padding="same", strides=1,
               use_bias=False)(x)
    x = BatchNormalization()(x)
    x = ReLU()(x)

    x = UpSampling2D(size=(2, 2))(x)
    x = Conv2D(128, kernel_size=3, padding="same", strides=1,
               use_bias=False)(x)
    x = BatchNormalization()(x)
    x = ReLU()(x)

    x = UpSampling2D(size=(2, 2))(x)
    x = Conv2D(64, kernel_size=3, padding="same", strides=1, use_bias=False)(x)
    x = BatchNormalization()(x)
    x = ReLU()(x)

    x = Conv2D(3, kernel_size=3, padding="same", strides=1, use_bias=False)(x)
    x = Activation('tanh')(x)

    model = Model(inputs=[input_layer, input_lr_images],
                  outputs=[x, mean_logsigma])
    return model
Exemple #6
0
    def train_hinsage(self, S, node_identifiers, label, batch_size, epochs):
        """
        
        This function trains a HinSAGE model, implemented in Tensorflow.
        It returns the trained HinSAGE model and a pandas datarame containing the embeddings generated for the train nodes.
        
        Parameters
        ----------
        S : StellarGraph Object
            The graph on which HinSAGE trains its aggregator functions.
        node_identifiers : list
            Defines the nodes that HinSAGE uses to train its aggregation functions.
        label: Pandas dataframe
            Defines the label of the nodes used for training, with the index representing the nodes.
        batch_size: int
            batch size to train the neural network in which HinSAGE is implemented.
        epochs: int
            Number of epochs for the neural network.
        
        """
        from stellargraph.layer import MeanHinAggregator, MaxHinAggregator, MaxPoolingHinAggregator

        # The mapper feeds data from sampled subgraph to GraphSAGE model
        train_node_identifiers = node_identifiers[:round(0.8 *
                                                         len(node_identifiers)
                                                         )]
        train_labels = label.loc[train_node_identifiers]
        validation_node_identifiers = node_identifiers[
            round(0.8 * len(node_identifiers)):]
        validation_labels = label.loc[validation_node_identifiers]
        generator = HinSAGENodeGenerator(
            S,
            batch_size,
            self.num_samples,
            head_node_type=self.embedding_for_node_type)
        train_gen = generator.flow(train_node_identifiers,
                                   train_labels,
                                   shuffle=True)
        test_gen = generator.flow(validation_node_identifiers,
                                  validation_labels)

        # HinSAGE model
        model = HinSAGE(layer_sizes=[self.embedding_size] *
                        len(self.num_samples),
                        generator=generator,
                        aggregator=MeanHinAggregator,
                        dropout=0)
        x_inp, x_out = model.build()

        # Final estimator layer
        prediction = layers.Dense(units=1,
                                  activation="sigmoid",
                                  dtype='float32')(x_out)

        # Create Keras model for training
        model = Model(inputs=x_inp, outputs=prediction)
        model.compile(
            optimizer=optimizers.Adam(lr=1e-3),
            loss=binary_crossentropy,
        )

        # Train Model
        model.fit(train_gen,
                  epochs=epochs,
                  verbose=1,
                  validation_data=test_gen,
                  shuffle=False)

        trained_model = Model(inputs=x_inp, outputs=x_out)
        train_gen_not_shuffled = generator.flow(node_identifiers,
                                                label,
                                                shuffle=False)
        embeddings_train = trained_model.predict(train_gen_not_shuffled)

        train_emb = pd.DataFrame(embeddings_train, index=node_identifiers)

        return trained_model, train_emb
def build_generator(options, name="Generator"):

    initializer = tf.random_normal_initializer(0.0, 0.02)

    inputs = Input(shape=(options.time_step, options.pitch_range, options.output_nc))

    x = inputs
    # (batch * 64 * 84 * 1)

    x = layers.Lambda(padding, name="PADDING_1")(x)
    # (batch * 70 * 90 * 1)

    x = layers.Conv2D(
        filters=options.gf_dim,
        kernel_size=7,
        strides=1,
        padding="valid",
        kernel_initializer=initializer,
        use_bias=False,
        name="CONV2D_1",
    )(x)
    x = InstanceNorm(x.shape[-1:])(x)
    x = layers.ReLU()(x)
    # (batch * 64 * 84 * 64)

    x = layers.Conv2D(
        filters=options.gf_dim * 2,
        kernel_size=3,
        strides=2,
        padding="same",
        kernel_initializer=initializer,
        use_bias=False,
        name="CONV2D_2",
    )(x)
    x = InstanceNorm(x.shape[-1:])(x)
    x = layers.ReLU()(x)
    # (batch * 32 * 42 * 128)

    x = layers.Conv2D(
        filters=options.gf_dim * 4,
        kernel_size=3,
        strides=2,
        padding="same",
        kernel_initializer=initializer,
        use_bias=False,
        name="CONV2D_3",
    )(x)
    x = InstanceNorm(x.shape[-1:])(x)
    x = layers.ReLU()(x)
    # (batch * 16 * 21 * 256)

    for i in range(10):
        # x = resnet_block(x, options.gf_dim * 4)
        x = ResNetBlock()(x, options.gf_dim * 4, initializer)
    # (batch * 16 * 21 * 256)

    x = layers.Conv2DTranspose(
        filters=options.gf_dim * 2,
        kernel_size=3,
        strides=2,
        padding="same",
        kernel_initializer=initializer,
        use_bias=False,
        name="DECONV2D_1",
    )(x)
    x = InstanceNorm(x.shape[-1:])(x)
    x = layers.ReLU()(x)
    # (batch * 32 * 42 * 128)

    x = layers.Conv2DTranspose(
        filters=options.gf_dim,
        kernel_size=3,
        strides=2,
        padding="same",
        kernel_initializer=initializer,
        use_bias=False,
        name="DECONV2D_2",
    )(x)
    x = InstanceNorm(x.shape[-1:])(x)
    x = layers.ReLU()(x)
    # (batch * 64 * 84 * 64)

    x = layers.Lambda(padding, name="PADDING_2")(x)
    # After padding, (batch * 70 * 90 * 64)

    x = layers.Conv2D(
        filters=options.output_nc,
        kernel_size=7,
        strides=1,
        padding="valid",
        kernel_initializer=initializer,
        activation="sigmoid",
        use_bias=False,
        name="CONV2D_4",
    )(x)
    # (batch * 64 * 84 * 1)

    outputs = x

    return Model(inputs=inputs, outputs=outputs, name=name)
Exemple #8
0
# On top of it we stick two fully-connected layers. Because we are facing a two-class classification problem, i.e. a binary classification problem, we will end our network with a sigmoid activation, so that the output of our network will be a single scalar between 0 and 1, encoding the probability that the current image is class 1 (as opposed to class 0).
# Flatten feature map to a 1-dim tensor so we can add fully connected layers
x = layers.Flatten()(x)

# Create a fully connected layer with ReLU activation and 512 hidden units
x = layers.Dense(512, activation='relu')(x)

# Create output layer with a single node and sigmoid activation
output = layers.Dense(1, activation='sigmoid')(x)

# Create model:
# input = input feature map
# output = input feature map + stacked convolution/maxpooling layers + fully
# connected layer + sigmoid output layer
model = Model(img_input, output)

model.summary()

from tensorflow.keras.optimizers import RMSprop

model.compile(loss='binary_crossentropy',
              optimizer=RMSprop(lr=0.001),
              metrics=['acc'])


# data generation
# Let's set up data generators that will read pictures in our source folders, convert them to float32 tensors, and feed them (with their labels) to our network. We'll have one generator for the training images and one for the validation images. Our generators will yield batches of 20 images of size 150x150 and their labels (binary).
#
# As you may already know, data that goes into neural networks should usually be normalized in some way to make it more amenable to processing by the network. (It is uncommon to feed raw pixels into a convnet.) In our case, we will preprocess our images by normalizing the pixel values to be in the [0, 1] range (originally all values are in the [0, 255] range).
#
Exemple #9
0
# load weights into the agent
agent.load_model(file_path='models/{:s}/'.format(version), iteration=iteration)
'''
# make some moves
for i in range(3):
    env.print_game()
    action = agent.move(s)
    next_s, _, _, _, _ = env.step(action)
    s = next_s.copy()
env.print_game()
'''

# define temporary model to get intermediate outputs
layer_num = 1
model_temp = Model(inputs=agent._model.input,
                   outputs=agent._model.layers[layer_num].output)
output_temp = model_temp.predict(s.reshape(1, board_size, board_size,
                                           frames))[0, :, :, :]
print('selected layer shape : ', output_temp.shape)

# save layer weights
plt.clf()
w = agent._model.layers[layer_num].weights[0].numpy()
nrows, ncols = (w.shape[2] * w.shape[3]) // 8, 8
fig, axs = plt.subplots(nrows, ncols, figsize=(17, 17))
for i in range(nrows):
    for j in range(ncols):
        axs[i][j].imshow(w[:, :, j % 2, i * (ncols // 2) + (j // 2)],
                         cmap='gray')
fig.savefig('images/weight_visual_{:s}_{:04d}_conv{:d}.png'\
            .format(version, iteration, layer_num),
Exemple #10
0
def CNN2(images, y, params=None):
    print(params)
    x_train, x_test, y_train, y_test = train_test_split(images,
                                                        y,
                                                        test_size=0.2,
                                                        stratify=y,
                                                        random_state=100
                                                        )
    x_train = np.array(x_train)
    x_test = np.array(x_test)

    image_size = x_train.shape[1]
    image_size2 = x_train.shape[2]

    x_train = np.reshape(x_train, [-1, image_size, image_size2, 1])
    x_test = np.reshape(x_test, [-1, image_size, image_size2, 1])


    kernel = params["kernel"]
    kernel2=int(kernel/2)
    inputs = Input(shape=(image_size, image_size2, 1))

    X = Conv2D(32, (kernel,kernel), activation='relu', name='conv0')(inputs)
    X = Dropout(rate=params['dropout1'])(X)
    X = Conv2D(64, (kernel, kernel), activation='relu', name='conv1')(X)
    X = Dropout(rate=params['dropout2'])(X)
    X = Conv2D(128, (kernel, kernel), activation='relu', name='conv2')(X)
    X = Flatten()(X)
    X = Dense(256, activation='relu', kernel_initializer='glorot_uniform')(X)
    X = Dense(1024, activation='relu', kernel_initializer='glorot_uniform')(X)
    X = Dense(2, activation='softmax', kernel_initializer='glorot_uniform')(X)

    model = Model(inputs, X)
    adam = Adam(params["learning_rate"])

    model.compile(loss='binary_crossentropy',
                  optimizer=adam,
                  metrics=['acc'])

    # Train the model.
    hist = model.fit(
        x_train,
        y_train,
        epochs=params["epoch"],
        verbose=2,
        validation_data=(x_test, y_test),
        batch_size=params["batch"],
        callbacks=[EarlyStopping(monitor='val_loss', mode='min', patience=10),
                   ModelCheckpoint(filepath='best_model.h5', monitor='val_loss', save_best_only=True)]
    )
    model.load_weights('best_model.h5')

    y_test = np.argmax(y_test, axis=1)

    Y_predicted = model.predict(x_test, verbose=0, use_multiprocessing=True, workers=12)

    Y_predicted = np.argmax(Y_predicted, axis=1)

    cf = confusion_matrix(y_test, Y_predicted)

    return model, {"balanced_accuracy_val": balanced_accuracy_score(y_test, Y_predicted) * 100,
                   "TP_val": cf[0][0],
                   "FN_val": cf[0][1], "FP_val": cf[1][0], "TN_val": cf[1][1]
                   }
def nnUNet_2D(image_shape, feature_maps=32, max_fa=480, num_pool=8, 
              k_init='he_normal', optimizer="sgd", lr=0.002, n_classes=1):
    """Create nnU-Net 2D. This implementations tries to be a Keras version of the
       original nnU-Net 2D presented in `nnU-Net Github <https://github.com/MIC-DKFZ/nnUNet>`_.
                                                                                
       Parameters
       ----------
       image_shape : 2D tuple
           Dimensions of the input image.              
                                                                                
       feature_map : ints, optional
           Feature maps to start with in the first level of the U-Net (will be 
           duplicated on each level). 

       max_fa : int, optional
           Number of maximum feature maps allowed to used in conv layers.
        
       num_pool : int, optional
           number of pooling (downsampling) operations to do.

       k_init : string, optional
           Kernel initialization for convolutional layers.                                                         
                                                                           
       optimizer : str, optional
           Optimizer used to minimize the loss function. Posible options: 
           ``sgd`` or ``adam``.                 
                                                                           
       lr : float, optional
           Learning rate value.                          
        
       n_classes: int, optional
           Number of classes.
                                                                           
       Returns
       -------                                                                 
       model : Keras model
           Model containing the U-Net.              
    """

    dinamic_dim = (None,)*(len(image_shape)-1) + (image_shape[-1],)                           
    x = Input(dinamic_dim)                                                      
    #x = Input(image_shape)                                                     
    inputs = x
        
    l=[]
    seg_outputs = []
    fa_save = []
    fa = feature_maps

    # ENCODER
    x = StackedConvLayers(x, fa, k_init, first_conv_stride=1)
    fa_save.append(fa)
    fa = fa*2 if fa*2 < max_fa else max_fa
    l.append(x)

    # conv_blocks_context
    for i in range(num_pool-1):
        x = StackedConvLayers(x, fa, k_init)
        fa_save.append(fa)
        fa = fa*2 if fa*2 < max_fa else max_fa
        l.append(x)

    # BOTTLENECK
    x = StackedConvLayers(x, fa, k_init, first_conv_stride=(1,2))

    # DECODER
    for i in range(len(fa_save)):
        # tu
        if i == 0:
            x = Conv2DTranspose(fa_save[-(i+1)], (1, 2), use_bias=False,
                                strides=(1, 2), padding='valid') (x)
        else:
            x = Conv2DTranspose(fa_save[-(i+1)], (2, 2), use_bias=False,
                                strides=(2, 2), padding='valid') (x)
        x = concatenate([x, l[-(i+1)]])

        # conv_blocks_localization
        x = StackedConvLayers(x, fa_save[-(i+1)], k_init, first_conv_stride=1)
        seg_outputs.append(Conv2D(n_classes, (1, 1), use_bias=False, activation="softmax") (x))   

    outputs = seg_outputs
    
    model = Model(inputs=[inputs], outputs=[outputs])

    # Select the optimizer
    if optimizer == "sgd":
        opt = tf.keras.optimizers.SGD(
            lr=lr, momentum=0.99, decay=0.0, nesterov=False)
    elif optimizer == "adam":
        opt = tf.keras.optimizers.Adam(
            lr=lr, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0,
            amsgrad=False)
    else:
        raise ValueError("Error: optimizer value must be 'sgd' or 'adam'")


    # Calculate the weigts as nnUNet does
    ################# Here we wrap the loss for deep supervision ############
    # we need to know the number of outputs of the network
    net_numpool = num_pool

    # we give each output a weight which decreases exponentially (division by 2) as the resolution decreases
    # this gives higher resolution outputs more weight in the loss
    weights = np.array([1 / (2 ** i) for i in range(net_numpool)])

    # we don't use the lowest 2 outputs. Normalize weights so that they sum to 1
    mask = np.array([True] + [True if i < net_numpool - 1 else False for i in range(1, net_numpool)])
    weights[~mask] = 0
    weights = weights / weights.sum()
    weights = weights[::-1] 
    ################# END ###################

    # Compile the model
    model.compile(optimizer=opt, loss='binary_crossentropy',
                  metrics=[jaccard_index_softmax], loss_weights=list(weights))

    return model
Exemple #12
0
def CNN_Nature(images, y, param=None):
    print(param)
    x_train, x_test, y_train, y_test = train_test_split(images,
                                                        y,
                                                        test_size=0.2,
                                                        stratify=y,
                                                        random_state=100)
    x_train = np.array(x_train)
    x_test = np.array(x_test)

    image_size = x_train.shape[1]
    image_size2 = x_train.shape[2]

    x_train = np.reshape(x_train, [-1, image_size, image_size2, 1])
    x_test = np.reshape(x_test, [-1, image_size, image_size2, 1])

    num_filters = param["filter"]
    num_filters2 = param["filter2"]

    kernel = param["kernel"]

    inputs = Input(shape=(image_size, image_size2, 1))
    print(x_train.shape)
    out = Conv2D(filters=num_filters,
                 kernel_size=(kernel, kernel),
                 padding="same")(inputs)
    out = BatchNormalization()(out)
    out = Activation('relu')(out)
    out = MaxPooling2D(strides=2, pool_size=2)(out)

    out = Conv2D(filters=2 * num_filters,
                 kernel_size=(kernel, kernel),
                 padding="same")(out)
    out = BatchNormalization()(out)
    out = Activation('relu')(out)
    out = MaxPooling2D(strides=2, pool_size=2)(out)

    out = Conv2D(filters=4 * num_filters,
                 kernel_size=(kernel, kernel),
                 padding="same")(out)
    out = BatchNormalization()(out)
    out = Activation('relu')(out)

    # layer 2
    out2 = Conv2D(filters=num_filters2,
                  kernel_size=(kernel, kernel),
                  padding="same")(inputs)
    out2 = BatchNormalization()(out2)
    out2 = Activation('relu')(out2)
    out2 = MaxPooling2D(strides=2, pool_size=2)(out2)

    out2 = Conv2D(filters=2 * num_filters2,
                  kernel_size=(kernel, kernel),
                  padding="same")(out2)
    out2 = BatchNormalization()(out2)
    out2 = Activation('relu')(out2)
    out2 = MaxPooling2D(strides=2, pool_size=2)(out2)

    out2 = Conv2D(filters=4 * num_filters2,
                  kernel_size=(kernel, kernel),
                  padding="same")(out2)
    out2 = BatchNormalization()(out2)
    out2 = Activation('relu')(out2)

    # final layer
    outf = Concatenate()([out, out2])
    out_f = AveragePooling2D(strides=2, pool_size=2)(outf)
    out_f = Flatten()(out_f)
    predictions = Dense(2, activation='softmax')(out_f)

    # This creates a model that includes
    # the Input layer and three Dense layers
    model = Model(inputs=inputs, outputs=predictions)

    adam = Adam(lr=param["learning_rate"])

    # Compile the model.
    model.compile(
        optimizer=adam,
        loss='categorical_crossentropy',
        metrics=['accuracy'],
    )

    # Train the model.
    hist = model.fit(
        x_train,
        y_train,
        epochs=param["epoch"],
        verbose=2,
        validation_data=(x_test, y_test),
        batch_size=param["batch"],
        callbacks=[EarlyStopping(monitor='val_loss', mode='min', patience=10),
                   ModelCheckpoint(filepath='best_model.h5', monitor='val_loss', save_best_only=True)]
    )
    model.load_weights('best_model.h5')

    y_test = np.argmax(y_test, axis=1)

    Y_predicted = model.predict(x_test, verbose=0, use_multiprocessing=True, workers=12)

    Y_predicted = np.argmax(Y_predicted, axis=1)

    cf = confusion_matrix(y_test, Y_predicted)

    return model, {"balanced_accuracy_val": balanced_accuracy_score(y_test, Y_predicted) * 100, "TP_val": cf[0][0],
                   "FN_val": cf[0][1], "FP_val": cf[1][0], "TN_val": cf[1][1]
                   }
Exemple #13
0
strategy = tf.distribute.MirroredStrategy()
print('Number of devices: {}'.format(strategy.num_replicas_in_sync))

with strategy.scope():
    base_model = VGG16(weights='imagenet',
                       include_top=False,
                       input_shape=(32, 32, 3))
    # Extract the last layer from third block of vgg16 model
    last = base_model.get_layer('block3_pool').output
    # Add classification layers on top of it
    x = Flatten()(last)
    x = Dense(256, activation='relu')(x)
    x = Dropout(0.5)(x)
    pred = Dense(10, activation='sigmoid')(x)

    model = Model(base_model.input, pred)

    # set the base model's layers to non-trainable
    # uncomment next two lines if you don't want to
    # train the base model
    # for layer in base_model.layers:
    #     layer.trainable = False

    # compile the model with a SGD/momentum optimizer
    # and a very slow learning rate.
    model.compile(loss='binary_crossentropy',
                  optimizer=tf.optimizers.SGD(lr=args.learning_rate,
                                              momentum=0.9),
                  metrics=['accuracy'])

# prepare data augmentation configuration
 def findModel(self):
     #max val acccuracy, epoch at max val_acc, model type of max val_acc
     mx = 0
     epochAtMx = 0
     modelType = ""
     #Set initial hyperparameters for training runs
     batch_size=16
     n_epochs=10
     lr = 0.001
     models = ['vgg16','mobilenet','inception','inception_resnet','xception']
     #create directory for saved model files with highest val_accuracy
     save_dir = os.path.join(os.getcwd(), 'saved_models')
     if not os.path.isdir(save_dir):
         os.makedirs(save_dir)
     #model params to set for each training run
     preprocessingFunc = None
     model = None
     removeLayer = 0
     freezeLayers = 0
     #batches to set for each training run
     train_batches = None
     valid_batches = None
     test_batches = None
     
     #Perform training for each model type, saving the best models and validation accuracies.
     #Record best overall val_accuracy,epoch,model
                     #len(models)
     for i in range(len(models)):
         model_name = models[i]
         #callback for saving checkpoint at best validation accuracy
         model_filename = '%s_model.{epoch:03d}.h5' % model_name
         filepath = os.path.join(save_dir, model_filename)
         # prepare callback for model saving
         checkpoint = ModelCheckpoint(filepath=filepath,
                                    monitor='val_accuracy',
                                    verbose=1,
                                    save_best_only=True)
           
         if model_name == 'vgg16':
             preprocessingFunc = tf.keras.applications.vgg16.preprocess_input
             model = tf.keras.applications.vgg16.VGG16()
             removeLay = -2
             freezeLay = -3
         elif model_name == 'mobilenet':
             preprocessingFunc = tf.keras.applications.mobilenet.preprocess_input
             model = tf.keras.applications.mobilenet.MobileNet()
             removeLay = -6
             freezeLay = -5
         elif model_name == 'inception':
             preprocessingFunc = tf.keras.applications.inception_v3.preprocess_input
             model = tf.keras.applications.inception_v3.InceptionV3()
             removeLay = -1
             freezeLay = -5
         elif model_name == 'inception_resnet':
             preprocessingFunc = tf.keras.applications.inception_resnet_v2.preprocess_input
             model = tf.keras.applications.inception_resnet_v2.InceptionResNetV2()
             removeLay = -1
             freezeLay = -5
         elif model_name == 'xception':
             preprocessingFunc = tf.keras.applications.xception.preprocess_input
             model = tf.keras.applications.xception.Xception()
             removeLay = -1
             freezeLay = -5
         
         train_batches = ImageDataGenerator(preprocessing_function=preprocessingFunc) .flow_from_directory(directory="training/", target_size=(224,224), classes=self.labels, batch_size=batch_size)
         valid_batches = ImageDataGenerator(preprocessing_function=preprocessingFunc) .flow_from_directory(directory="validation/", target_size=(224,224), classes=self.labels, batch_size=batch_size)
         test_batches = ImageDataGenerator(preprocessing_function=preprocessingFunc) .flow_from_directory(directory="testing", target_size=(224,224), classes=self.labels, batch_size=batch_size, shuffle=False)
         
         x = model.layers[removeLay].output  # we remove the output. layer that we  replace with the following line
         output = Dense(units=len(self.labels), activation='softmax')(x)
         model_new = Model(inputs=model.input, outputs=output)
         for layer in model_new.layers[:freezeLay]:  # retrain fc1 , fc2 and prediction layer
             layer.trainable = False
         model_new.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
         #model_new.summary()
         history_model = model_new.fit(x=train_batches,
                     validation_data=valid_batches,
                     validation_steps=len(valid_batches),
                     epochs=n_epochs,
                     verbose=1,callbacks=[checkpoint]
         )
         #print the max validation accuracy and the epoch at which it occured
         for k in range(len(history_model.history['val_accuracy'])):
             if history_model.history['val_accuracy'][k] > mx:
                 mx = history_model.history['val_accuracy'][k]
                 epochAtMx = k + 1
                 modelType = model_name
         print('max val acc: {}'.format(mx))
         print('at epoch: {}'.format(epochAtMx))
         print('best model: {}'.format(modelType))
         
     #Select and load the model with highest recorded val_accuracy
     bestModelFile = modelType + ('_model.%03d.h5' % epochAtMx)
     bestModel = keras.models.load_model('saved_models/'+bestModelFile)
     print('best model file: {}'.format(bestModelFile))
     
     #Set model attributes according to best model type
     if modelType == 'vgg16':
         preprocessingFunc = tf.keras.applications.vgg16.preprocess_input
         removeLay = -2
         freezeLay = -3
     elif modelType == 'mobilenet':
         preprocessingFunc = tf.keras.applications.mobilenet.preprocess_input
         removeLay = -6
         freezeLay = -5
     elif modelType == 'inception':
         preprocessingFunc = tf.keras.applications.inception_v3.preprocess_input
         removeLay = -1
         freezeLay = -5
     elif modelType == 'inception_resnet':
         preprocessingFunc = tf.keras.applications.inception_resnet_v2.preprocess_input
         removeLay = -1
         freezeLay = -5
     elif modelType == 'xception':
         preprocessingFunc = tf.keras.applications.xception.preprocess_input
         removeLay = -1
         freezeLay = -5
     
     #Initialize new callback for best model
     #callback for saving checkpoint at best validation accuracy
     model_name = 'bestModel'
     model_filename = '%s_model.{epoch:03d}.h5' % model_name
     filepath = os.path.join(save_dir, model_filename)
     # prepare callback for model saving
     checkpoint = ModelCheckpoint(filepath=filepath,
                               monitor='val_accuracy',
                               verbose=1,
                               save_best_only=True)
     
     #Update hyperparameters for tuning the best model
     #Create new batches for best model.
     #Then start training.
     #set new number of epochs , batch size, lern rate for best model tuning
     n_epochs=15
     batch_size = 32
     lr = 0.00001
     #create batches for new training round with updated batch size
     train_batches = ImageDataGenerator(rotation_range=5,horizontal_flip=True,preprocessing_function=preprocessingFunc) .flow_from_directory(directory="training/", target_size=(224,224), classes=self.labels, batch_size=batch_size)
     valid_batches = ImageDataGenerator(rotation_range=5,horizontal_flip=True,preprocessing_function=preprocessingFunc) .flow_from_directory(directory="validation/", target_size=(224,224), classes=self.labels, batch_size=batch_size)
     test_batches = ImageDataGenerator(preprocessing_function=preprocessingFunc) .flow_from_directory(directory="testing", target_size=(224,224), classes=self.labels, batch_size=batch_size, shuffle=False)
     opt = keras.optimizers.Adam(learning_rate=lr)
     bestModel.compile(optimizer=opt, loss='categorical_crossentropy', metrics=['accuracy'])
     history_bestModel = bestModel.fit(x=train_batches,
                 steps_per_epoch=len(train_batches),
                 validation_data=valid_batches,
                 validation_steps=len(valid_batches),
                 epochs=n_epochs,
                 verbose=1,callbacks=[checkpoint]
     )
     
     #Record best model's top validation accuracy and epoch.
     #Model h5 file saved by checkpoint callback.
     #print the max validation accuracy and the epoch at which it occured
     for k in range(len(history_bestModel.history['val_accuracy'])):
         if history_bestModel.history['val_accuracy'][k] > mx:
             mx = history_bestModel.history['val_accuracy'][k]
             epochAtMx = k + 1
             modelType = model_name
     print('max val acc: {}'.format(mx))
     print('at epoch: {}'.format(epochAtMx))
     print('best model: {}'.format(modelType))
     
     #save model summary
     summary_file = 'bestModel_summary.txt'
     with open(summary_file, 'w') as f:
         with redirect_stdout(f):
             bestModel.summary()
     
     #Show confusion matrix and training history of best model training run with tuned hyperparameters.
     predictions_bestModel = bestModel.predict(x=test_batches, steps=len(test_batches), verbose=0) 
     cm = confusion_matrix(y_true=test_batches.classes, y_pred=predictions_bestModel.argmax(axis=1))
     self.plot_confusion_matrix(cm=cm, classes=list(test_batches.class_indices.keys()),name="confusion_matrix_bestModel.png")
     plt.show()
     self.plot_history(history_bestModel,name="history_bestModel")
     plt.show()
     return modelType, epochAtMx
Exemple #15
0
for layer in pre_trained_model.layers:
    layer.trainable = False

# pre_trained_model.summary()

last_layer = pre_trained_model.get_layer('mixed7')
print('last layer output shape: ', last_layer.output_shape)
last_output = last_layer.output

x = layers.Flatten()(last_output)
x = layers.Dense(1024, activation='relu')(x)
x = layers.Dropout(0.2)(x)

x = layers.Dense(classes, activation='softmax')(x)

model = Model(pre_trained_model.input, x)

model.compile(optimizer=tf.keras.optimizers.RMSprop(lr=0.0001),
              loss='categorical_crossentropy',
              metrics=['accuracy', 'mae'])

history = model.fit_generator(
    train_generator,
    steps_per_epoch=total_count // batch_size,
    epochs=100,
)

plots = ['mae']
for plot in plots:
    metric = history.history[plot]
    # val_metric = history.history[f"val_{plot}"]
Exemple #16
0
base_model = InceptionResNetV2(
    weights='imagenet',
    include_top=False,
    input_shape=(image_dim, image_dim, 3)
)
add_model = Sequential([
    Conv2D(256, kernel_size = (3,3), padding='same'),
    MaxPooling2D(pool_size = (2,2), padding='same'),
    Flatten(),
    Dense(256),
    Activation(activation='relu'),
    Dropout(0.3),
    Dense(train_data.num_classes, activation = 'softmax')
])
model = Model(inputs=base_model.input, outputs=add_model(base_model.output))
model.summary()
#--------------------------------------------------------------------------------------------------------------------------------
# Optimizers
#--------------------------------------------------------------------------------------------------------------------------------
sgd = SGD(
    lr=1e-4,
    momentum=0.9,
)
#--------------------------------------------------------------------------------------------------------------------------------
# Compile Model
#--------------------------------------------------------------------------------------------------------------------------------
model.compile(
    optimizer = sgd, # Test adam and nadam/ best result rmsprop
    loss = categorical_crossentropy,
    metrics = ['accuracy'],
        # We create and train our DeepGraphInfomax model (docs). Note that the loss used here must always be
        # tf.nn.sigmoid_cross_entropy_with_logits.

        fullbatch_generator = FullBatchNodeGenerator(G, sparse=False)
        gcn_model = GCN(layer_sizes=[2],
                        activations=["relu"],
                        generator=fullbatch_generator)

        corrupted_generator = CorruptedGenerator(fullbatch_generator)
        gen = corrupted_generator.flow(G.nodes())

        infomax = DeepGraphInfomax(gcn_model, corrupted_generator)
        x_in, x_out = infomax.in_out_tensors()

        model = Model(inputs=x_in, outputs=x_out)
        model.compile(loss=tf.nn.sigmoid_cross_entropy_with_logits,
                      optimizer=Adam(lr=1e-3))

        epochs = 100

        es = EarlyStopping(monitor="loss", min_delta=0, patience=20)
        history = model.fit(gen, epochs=epochs, verbose=0, callbacks=[es])
        # plot_history(history)

        x_emb_in, x_emb_out = gcn_model.in_out_tensors()

        # for full batch models, squeeze out the batch dim (which is 1)
        x_out = tf.squeeze(x_emb_out, axis=0)
        emb_model = Model(inputs=x_emb_in, outputs=x_out)
Exemple #18
0
# model = load_model('./checkpoints/weights-20200328-faces.hdf5')

# Define a new model
input_shape = Input((FRAME_COUNT, FRAME_WIDTH, FRAME_HEIGHT, FRAME_CHANNELS))

LSTM = ConvLSTM2D(3,(1,1), strides=(1,1), padding='same', activation='relu',\
recurrent_activation='hard_sigmoid')(input_shape)
LSTM = MaxPooling2D((2, 2), strides=(1, 1), padding='same')(LSTM)

XCEPT = Xception(include_top=False, weights='imagenet', pooling='avg')(LSTM)
XCEPT = Flatten()(XCEPT)

OUT = Dropout(0.80)(XCEPT)
OUT = Dense(1, activation='sigmoid')(OUT)

model = Model(input_shape, OUT)


# Define custom metric
# Clip the ends of the predicted probabilities
def clip_loss(y_true, y_pred):
    clip_loss = binary_crossentropy(y_true, K.clip(y_pred, 0.01, 0.99))
    return clip_loss


# Compile and view model summary
optimizer = Adam(learning_rate=1e-5)
model.compile(optimizer, loss='binary_crossentropy', metrics=['accuracy'])
model.summary()

# Model checkpoint
Exemple #19
0
def train_model(Gnx, train_data, test_data, all_features):
    output_results = {}
    from collections import Counter
    #TODO: save size of dataset, train_data, and test data
    #save the count of each subject in the blocks
    print(len(train_data), len(test_data))
    subject_groups_train = Counter(train_data['subject'])
    subject_groups_test = Counter(test_data['subject'])
    output_results['train_size'] = len(train_data)
    output_results['test_size'] = len(test_data)
    output_results['subject_groups_train'] = subject_groups_train
    output_results['subject_groups_test'] = subject_groups_test

    #node_features = train_data[feature_names]
    #print (feature_names)
    G = sg.StellarGraph(Gnx, node_features=all_features)
    #TODO: save graph info
    print(G.info())
    print("writing graph.dot")
    #write_dot(Gnx,"graph.dot")
    output_results['graph_info'] = G.info()
    print("building the graph generator...")

    batch_size = 50
    num_samples = [10, 5]
    generator = GraphSAGENodeGenerator(G, batch_size, num_samples)
    #generator = HinSAGENodeGenerator(G, batch_size, num_samples)

    target_encoding = feature_extraction.DictVectorizer(sparse=False)
    train_targets = target_encoding.fit_transform(
        train_data[["subject"]].to_dict('records'))
    print(np.unique(train_data["subject"].to_list()))
    class_weights = class_weight.compute_class_weight(
        'balanced', np.unique(train_data["subject"].to_list()),
        train_data["subject"].to_list())
    print('class_weights', class_weights)
    test_targets = target_encoding.transform(test_data[["subject"
                                                        ]].to_dict('records'))
    train_gen = generator.flow(train_data.index, train_targets, shuffle=True)
    graphsage_model = GraphSAGE(
        #graphsage_model = HinSAGE(
        #layer_sizes=[32, 32],
        layer_sizes=[80, 80],
        generator=generator,  #train_gen,
        bias=True,
        dropout=0.5,
    )
    print("building model...")
    #x_inp, x_out = graphsage_model.build(flatten_output=True)
    x_inp, x_out = graphsage_model.build()
    prediction = layers.Dense(units=train_targets.shape[1],
                              activation="softmax")(x_out)

    model = Model(inputs=x_inp, outputs=prediction)
    print("compiling model...")
    model.compile(
        optimizer=optimizers.Adam(lr=0.005),
        loss=losses.categorical_crossentropy,
        metrics=["acc", metrics.categorical_accuracy],
    )
    print("testing the model...")
    test_gen = generator.flow(test_data.index, test_targets)
    history = model.fit_generator(
        train_gen,
        epochs=EPOCH,
        validation_data=test_gen,
        verbose=2,
        shuffle=True,
        class_weight=class_weights,
    )
    # save test metrics
    test_metrics = model.evaluate_generator(test_gen)
    print("\nTest Set Metrics:")
    output_results['test_metrics'] = []
    for name, val in zip(model.metrics_names, test_metrics):
        output_results['test_metrics'].append({'name': name, 'val:': val})
        print("\t{}: {:0.4f}".format(name, val))

    test_nodes = test_data.index
    test_mapper = generator.flow(test_nodes)
    test_predictions = model.predict_generator(test_mapper)
    node_predictions = target_encoding.inverse_transform(test_predictions)
    results = pd.DataFrame(node_predictions, index=test_nodes).idxmax(axis=1)
    df = pd.DataFrame({
        "Predicted": results,
        "True": test_data['subject']
    })  #, "program":test_data['program']})
    clean_result_labels = df["Predicted"].map(
        lambda x: x.replace('subject=', ''))
    # save predicted labels
    pred_labels = np.unique(clean_result_labels.values)
    #pred_program = np.unique(df['program'].values)
    # save predictions per label
    precision, recall, f1, _ = skmetrics.precision_recall_fscore_support(
        df['True'].values,
        clean_result_labels.values,
        average=None,
        labels=pred_labels)
    output_results['classifier'] = []
    for lbl, prec, rec, fm in zip(pred_labels, precision, recall, f1):
        output_results['classifier'].append({
            'label': lbl,
            'precision': prec,
            'recall': rec,
            'fscore': fm
        })
    print(output_results['classifier'])
    print(pred_labels)
    print('precision: {}'.format(precision))
    print('recall: {}'.format(recall))
    print('fscore: {}'.format(f1))

    return generator, model, x_inp, x_out, history, target_encoding, output_results
Exemple #20
0
    def get_nmf_model(self):

        num_factors = self.num_factors
        num_layers = self.num_layers
        layer1_dim = self.num_factors * (2**(num_layers - 1))

        num_users = self.num_users
        num_items = self.num_items
        num_genres = self.num_genres

        # input layer
        user_input_layer = layers.Input(shape=(1, ),
                                        dtype='int32',
                                        name='user_input')
        item_input_layer = layers.Input(shape=(1, ),
                                        dtype='int32',
                                        name='item_input')
        genre_input_layer = layers.Input(shape=(None, ),
                                         dtype='int32',
                                         name='genre_input')

        # GMF embedding layer
        GMF_user_embedding = layers.Embedding(
            input_dim=num_users,
            output_dim=num_factors,
            embeddings_regularizer=regularizers.l2(0.),
            name='GMF_user_embedding')(user_input_layer)
        GMF_item_embedding = layers.Embedding(
            input_dim=num_items,
            output_dim=self.num_movie_factors,
            embeddings_regularizer=regularizers.l2(0.),
            name='GMF_item_embedding')(item_input_layer)

        GMF_genre_embedding = layers.Embedding(
            input_dim=num_genres,
            output_dim=self.num_movie_factors,
            mask_zero=True,
            embeddings_regularizer=regularizers.l2(0.),
            name='GMF_genre_embedding')(genre_input_layer)

        GMF_genre_emb_mean = tf.reduce_mean(GMF_genre_embedding, 1)

        # MLP embedding layer
        MLP_user_embedding = layers.Embedding(
            input_dim=num_users,
            output_dim=num_factors,
            embeddings_regularizer=regularizers.l2(0.),
            name='MLP_user_embedding')(user_input_layer)
        MLP_item_embedding = layers.Embedding(
            input_dim=num_items,
            output_dim=self.num_movie_factors,
            embeddings_regularizer=regularizers.l2(0.),
            name='MLP_item_embedding')(item_input_layer)

        MLP_genre_embedding = layers.Embedding(
            input_dim=num_genres,
            output_dim=self.num_movie_factors,
            mask_zero=True,
            embeddings_regularizer=regularizers.l2(0.),
            name='MLP_genre_embedding')(genre_input_layer)
        MLP_genre_emb_mean = tf.reduce_mean(MLP_genre_embedding, 1)

        # GMF
        GMF_user_latent = layers.Flatten()(GMF_user_embedding)
        GMF_item_latent = layers.Flatten()(GMF_item_embedding)
        GMF_genre_latent = layers.Flatten()(GMF_genre_emb_mean)

        GMF_movie_latent = layers.concatenate(
            [GMF_item_latent, GMF_genre_latent])

        # MLP
        MLP_user_latent = layers.Flatten()(MLP_user_embedding)
        MLP_item_latent = layers.Flatten()(MLP_item_embedding)
        MLP_genre_latent = layers.Flatten()(MLP_genre_emb_mean)

        MLP_movie_latent = layers.concatenate(
            [MLP_item_latent, MLP_genre_latent])

        # gmf - element wise product
        GMF_vector = layers.multiply([GMF_user_latent, GMF_movie_latent])
        GMF_vector = layers.BatchNormalization()(GMF_vector)

        # mlp
        MLP_vector = layers.concatenate([MLP_user_latent, MLP_movie_latent])
        MLP_vector = layers.BatchNormalization()(MLP_vector)

        for i in range(num_layers - 1):
            MLP_vector = layers.Dense((int)(layer1_dim / (2**i)),
                                      activation='tanh',
                                      name='layer%s' % str(i + 1))(MLP_vector)
            MLP_vector = layers.BatchNormalization()(MLP_vector)

        #NeuMF layer
        NeuMF_vector = layers.concatenate([GMF_vector, MLP_vector])
        prediction = layers.Dense(1, activation='tanh',
                                  name='prediction')(NeuMF_vector)

        model = Model([user_input_layer, item_input_layer, genre_input_layer],
                      prediction)

        self.nmf_model = model
        self.nmf_model = self.set_nmf_weight()

        return self.nmf_model
def build_discriminator_classifier(options, name="Discriminator_Classifier"):

    initializer = tf.random_normal_initializer(0.0, 0.02)

    inputs = Input(shape=(options.time_step, options.pitch_range, options.output_nc))

    x = inputs
    # (batch * 64, 84, 1)

    x = layers.Conv2D(
        filters=options.df_dim,
        kernel_size=[1, 12],
        strides=[1, 12],
        padding="same",
        kernel_initializer=initializer,
        use_bias=False,
        name="CONV2D_1",
    )(x)
    x = layers.LeakyReLU(alpha=0.2)(x)
    # (batch * 64 * 7 * 64)

    x = layers.Conv2D(
        filters=options.df_dim * 2,
        kernel_size=[4, 1],
        strides=[4, 1],
        padding="same",
        kernel_initializer=initializer,
        use_bias=False,
        name="CONV2D_2",
    )(x)
    x = InstanceNorm(x.shape[-1:])(x)
    x = layers.LeakyReLU(alpha=0.2)(x)
    # (batch * 16 * 7 * 128)

    x = layers.Conv2D(
        filters=options.df_dim * 4,
        kernel_size=[2, 1],
        strides=[2, 1],
        padding="same",
        kernel_initializer=initializer,
        use_bias=False,
        name="CONV2D_3",
    )(x)
    x = InstanceNorm(x.shape[-1:])(x)
    x = layers.LeakyReLU(alpha=0.2)(x)
    # (batch * 8 * 7 * 256)

    x = layers.Conv2D(
        filters=options.df_dim * 8,
        kernel_size=[8, 1],
        strides=[8, 1],
        padding="same",
        kernel_initializer=initializer,
        use_bias=False,
        name="CONV2D_4",
    )(x)
    x = InstanceNorm(x.shape[-1:])(x)
    x = layers.LeakyReLU(alpha=0.2)(x)
    # (batch * 1 * 7 * 512)

    x = layers.Conv2D(
        filters=2,
        kernel_size=[1, 7],
        strides=[1, 7],
        padding="same",
        kernel_initializer=initializer,
        use_bias=False,
        name="CONV2D_5",
    )(x)
    # (batch * 1 * 1 * 2)

    x = tf.reshape(x, [-1, 2])
    # (batch * 2)

    outputs = x

    return Model(inputs=inputs, outputs=outputs, name=name)
Exemple #22
0
    def __init__(self,
                 num_workers: int = 1,
                 model: keras.Model = None,
                 optimizer: Union[keras.optimizers.Optimizer, str] = None,
                 loss: Union[keras.losses.Loss, str] = None,
                 metrics: Union[List[keras.metrics.Metric], List[str]] = None,
                 feature_columns: Union[str, List[str]] = None,
                 feature_types: Optional[Union[DType, List[DType]]] = None,
                 feature_shapes: Optional[Union[TensorShape,
                                                List[TensorShape]]] = None,
                 label_column: str = None,
                 label_type: Optional[tf.DType] = None,
                 label_shape: Optional[tf.TensorShape] = None,
                 batch_size: int = None,
                 num_epochs: int = None,
                 shuffle: bool = True,
                 config: Dict = None):
        """
        A scikit-learn like API to distributed training Tensorflow Keras model. In the backend it
        leverage the ray.sgd.TorchTrainer.
        :param num_workers: the number of workers for distributed model training
        :param model: the model, it should be instance of tensorflow.keras.Model. We do not support
                      multiple output models.
        :param optimizer: the optimizer, it should be keras.optimizers.Optimizer instance or str.
                          We do not support multiple optimizers currently.
        :param loss: the loss, it should be keras.losses.Loss instance or str. We do not support
                     multiple losses.
        :param metrics: the metrics list. It could be None, a list of keras.metrics.Metric instance
                        or a list of str.
        :param feature_columns: the feature columns name.
        :param feature_types: the type for each feature input. It must match the length of the
                              feature_columns if provided. It will be tf.float32 by default.
        :param feature_shapes: the shape for each feature input. It must match the length of the
                               feature_columns
        :param label_column: the label column name.
        :param label_type: the label type, it will be tf.float32 by default.
        :param label_shape: the label shape.
        :param batch_size: the batch size
        :param num_epochs: the number of epochs
        :param shuffle: whether input dataset should be shuffle, True by default.
        :param config: extra config will fit into TFTrainer.
        """
        self._num_workers: int = num_workers

        # model
        assert model is not None, "model must be not be None"
        if isinstance(model, keras.Model):
            self._serialized_model = model.to_json()
        else:
            raise Exception(
                "Unsupported parameter, we only support tensorflow.keras.Model"
            )

        # optimizer
        # TODO: we should support multiple optimizers for multiple outputs model
        assert optimizer is not None, "optimizer must not be None"
        if isinstance(optimizer, str):
            # it is a str represents the optimizer
            _optimizer = optimizer
        elif isinstance(optimizer, keras.optimizers.Optimizer):
            _optimizer = keras.optimizers.serialize(optimizer)
        else:
            raise Exception(
                "Unsupported parameter, we only support keras.optimizers.Optimizer subclass "
                "instance or a str to represent the optimizer")
        self._serialized_optimizer = _optimizer

        # loss
        # TODO: we should support multiple losses for multiple outputs model
        assert loss is not None, "loss must not be None"
        if isinstance(loss, str):
            _loss = loss
        elif isinstance(loss, keras.losses.Loss):
            _loss = keras.losses.serialize(loss)
        else:
            raise Exception(
                "Unsupported parameter, we only support keras.losses.Loss subclass "
                "instance or a str to represents the loss)")
        self._serialized_loss = _loss

        # metrics
        if metrics is None:
            _metrics = None
        else:
            assert isinstance(metrics, list), "metrics must be a list"
            if isinstance(metrics[0], str):
                _metrics = metrics
            elif isinstance(metrics[0], keras.metrics.Metric):
                _metrics = [keras.metrics.serialize(m) for m in metrics]
            else:
                raise Exception(
                    "Unsupported parameter, we only support list of "
                    "keras.metrics.Metrics instances or list of str to")
        self._serialized_metrics = _metrics

        self._feature_columns = feature_columns
        self._feature_types = feature_types
        self._feature_shapes = feature_shapes
        self._label_column = label_column
        self._label_type = label_type
        self._label_shape = label_shape

        _config = {"batch_size": batch_size}
        _config.update(config)
        self._config = _config
        self._num_epochs: int = num_epochs
        self._shuffle: bool = shuffle

        self._trainer: TFTrainer = None
class BaseModel:
    def __init__(self, train_generator=None, subpixel=False, **kwargs):

        self.train_generator = train_generator
        self.subpixel = subpixel
        if "skip_init" not in kwargs:
            config = self.train_generator.get_config()
            if self.train_model is NotImplemented:
                self.__init_input__(config["image_shape"])
                self.__init_model__()
                self.__init_train_model__()
            if self.train_generator is not None:
                if self.subpixel:
                    output_sigma = config["output_sigma"]
                else:
                    output_sigma = None
                self.__init_predict_model__(
                    config["output_shape"],
                    config["keypoints_shape"],
                    config["downsample_factor"],
                    config["output_sigma"],
                )

    train_model = NotImplemented

    def __init_input__(self, image_shape):
        self.input_shape = image_shape
        self.inputs = Input(self.input_shape)

    def __init_train_model__(self):
        if isinstance(self.train_model, Model):
            self.compile = self.train_model.compile
            self.n_outputs = len(self.train_model.outputs)
        else:
            raise TypeError("self.train_model must be keras.Model class")

    def __init_model__(self):
        raise NotImplementedError(
            "__init_model__ method must be" "implemented to define `self.train_model`"
        )

    def __init_predict_model__(
        self,
        output_shape,
        keypoints_shape,
        downsample_factor,
        output_sigma=None,
        **kwargs
    ):

        outputs = self.train_model(self.inputs)
        if isinstance(outputs, list):
            outputs = outputs[-1]
        if self.subpixel:
            kernel_size = np.min(output_shape)
            kernel_size = (kernel_size // largest_factor(kernel_size)) + 1
            sigma = output_sigma
            keypoints = SubpixelMaxima2D(
                kernel_size,
                sigma,
                upsample_factor=100,
                index=keypoints_shape[0],
                coordinate_scale=2 ** downsample_factor,
                confidence_scale=255.0,
            )(outputs)
        else:
            keypoints = Maxima2D(
                index=keypoints_shape[0],
                coordinate_scale=2 ** downsample_factor,
                confidence_scale=255.0,
            )(outputs)
        self.predict_model = Model(self.inputs, keypoints, name=self.train_model.name)
        self.predict = self.predict_model.predict
        self.predict_generator = self.predict_model.predict_generator
        self.predict_on_batch = self.predict_model.predict_on_batch

        # Fix for passing model to callbacks.ModelCheckpoint
        if hasattr(self.train_model, "_in_multi_worker_mode"):
            self._in_multi_worker_mode = self.train_model._in_multi_worker_mode

    def fit(
        self,
        batch_size,
        validation_batch_size=1,
        callbacks=[],
        epochs=1,
        use_multiprocessing=False,
        n_workers=1,
        steps_per_epoch=None,
        **kwargs
    ):
        """
        Trains the model for a given number of epochs (iterations on a dataset).

        Parameters
        ----------
        batch_size : int
            Number of samples per training update.
        validation_batch_size : int
            Number of samples per validation batch used when evaluating the model.
        callbacks : list or None
            List of keras.callbacks.Callback instances or deepposekit callbacks.
            List of callbacks to apply during training and validation.
        epochs: int
            Number of epochs to train the model. An epoch is an iteration over the entire dataset,
            or for `steps_per_epoch` number of batches
        use_multiprocessing: bool, default=False
            Whether to use the multiprocessing module when generating batches of data.
        n_workers: int
            Number of processes to run for generating batches of data.
        steps_per_epoch: int or None
            Number of batches per epoch. If `None` this is automatically determined
            based on the size of the dataset.
        """

        if not self.train_model._is_compiled:
            warnings.warn(
                """\nAutomatically compiling with default settings: model.compile('adam', 'mse')\n"""
                "Call model.compile() manually to use non-default settings.\n"
            )
            self.train_model.compile("adam", "mse")

        train_generator = self.train_generator(
            self.n_outputs, batch_size, validation=False, confidence=True
        )
        validation_generator = self.train_generator(
            self.n_outputs, validation_batch_size, validation=True, confidence=True
        )
        validation_generator = (
            None if len(validation_generator) == 0 else validation_generator
        )
        if validation_generator is None:
            warnings.warn(
                "No validation set detected, so validation step will not be run and `val_loss` will not be available."
            )

        activated_callbacks = self.activate_callbacks(callbacks)

        self.train_model.fit_generator(
            generator=train_generator,
            steps_per_epoch=steps_per_epoch,
            epochs=epochs,
            use_multiprocessing=use_multiprocessing,
            workers=n_workers,
            callbacks=activated_callbacks,
            validation_data=validation_generator,
            **kwargs
        )

    def activate_callbacks(self, callbacks):
        activated_callbacks = []
        if callbacks is not None:
            if len(callbacks) > 0:
                for callback in callbacks:
                    if hasattr(callback, "pass_model"):
                        callback.pass_model(self)
                    activated_callbacks.append(callback)
        return activated_callbacks

    def evaluate(self, batch_size):

        if self.train_generator.n_validation > 0:
            keypoint_generator = self.train_generator(
                n_outputs=1, batch_size=batch_size, validation=True, confidence=False
            )

        elif self.train_generator.n_validation == 0:
            warnings.warn(
                "`n_validation` is 0, so the training set will be used for model evaluation"
            )
            keypoint_generator = self.train_generator(
                n_outputs=1, batch_size=batch_size, validation=False, confidence=False
            )
        y_pred_list = []
        confidence_list = []
        y_error_list = []
        euclidean_list = []
        for idx in range(len(keypoint_generator)):
            X, y_true = keypoint_generator[idx]

            y_pred = self.predict_model.predict_on_batch(X)
            confidence_list.append(y_pred[..., -1])
            y_pred_coords = y_pred[..., :2]
            y_pred_list.append(y_pred_coords)

            errors = keypoint_errors(y_true, y_pred_coords)
            y_error, euclidean, mae, mse, rmse = errors
            y_error_list.append(y_error)
            euclidean_list.append(euclidean)

        y_pred = np.concatenate(y_pred_list)
        confidence = np.concatenate(confidence_list)
        y_error = np.concatenate(y_error_list)
        euclidean = np.concatenate(euclidean_list)

        evaluation_dict = {
            "y_pred": y_pred,
            "y_error": y_error,
            "euclidean": euclidean,
            "confidence": confidence,
        }

        return evaluation_dict

    def save(self, path, overwrite=True):
        save_model(self, path)

    def get_config(self):
        config = {}
        if self.train_generator:
            base_config = self.train_generator.get_config()
            return dict(list(base_config.items()) + list(config.items()))
        else:
            return config
 def _build_encoder(self):
     encoder_input = self._add_encoder_input()
     conv_layers = self._add_conv_layers(encoder_input)
     bottleneck = self._add_bottleneck(conv_layers)
     self.encoder = Model(encoder_input, bottleneck, name="encoder")
Exemple #25
0
def build_stage2_discriminator():
    """
    Create Stage-II discriminator network
    """
    input_layer = Input(shape=(256, 256, 3))

    x = Conv2D(64, (4, 4),
               padding='same',
               strides=2,
               input_shape=(256, 256, 3),
               use_bias=False)(input_layer)
    x = LeakyReLU(alpha=0.2)(x)

    x = Conv2D(128, (4, 4), padding='same', strides=2, use_bias=False)(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.2)(x)

    x = Conv2D(256, (4, 4), padding='same', strides=2, use_bias=False)(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.2)(x)

    x = Conv2D(512, (4, 4), padding='same', strides=2, use_bias=False)(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.2)(x)

    x = Conv2D(1024, (4, 4), padding='same', strides=2, use_bias=False)(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.2)(x)

    x = Conv2D(2048, (4, 4), padding='same', strides=2, use_bias=False)(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.2)(x)

    x = Conv2D(1024, (1, 1), padding='same', strides=1, use_bias=False)(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.2)(x)

    x = Conv2D(512, (1, 1), padding='same', strides=1, use_bias=False)(x)
    x = BatchNormalization()(x)

    x2 = Conv2D(128, (1, 1), padding='same', strides=1, use_bias=False)(x)
    x2 = BatchNormalization()(x2)
    x2 = LeakyReLU(alpha=0.2)(x2)

    x2 = Conv2D(128, (3, 3), padding='same', strides=1, use_bias=False)(x2)
    x2 = BatchNormalization()(x2)
    x2 = LeakyReLU(alpha=0.2)(x2)

    x2 = Conv2D(512, (3, 3), padding='same', strides=1, use_bias=False)(x2)
    x2 = BatchNormalization()(x2)

    added_x = add([x, x2])
    added_x = LeakyReLU(alpha=0.2)(added_x)

    input_layer2 = Input(shape=(4, 4, 128))

    merged_input = concatenate([added_x, input_layer2])

    x3 = Conv2D(64 * 8, kernel_size=1, padding="same", strides=1)(merged_input)
    x3 = BatchNormalization()(x3)
    x3 = LeakyReLU(alpha=0.2)(x3)
    x3 = Flatten()(x3)
    x3 = Dense(1)(x3)
    x3 = Activation('sigmoid')(x3)

    stage2_dis = Model(inputs=[input_layer, input_layer2], outputs=[x3])
    return stage2_dis
def dropoutResUNet(
        num_channel_input=1,
        num_channel_output=1,
        img_rows=128,
        img_cols=128,
        y=np.array([-1, 1]),  # change to output_range in the future
        output_range=None,
        lr_init=None,
        loss_function=mixedLoss(),
        metrics_monitor=[mean_absolute_error, mean_squared_error],
        num_poolings=4,
        num_conv_per_pooling=3,
        num_channel_first=32,
        with_bn=True,  # don't use for F16 now
        with_baseline_concat=True,
        with_baseline_addition=-1,  # -1 means no
        activation_conv='relu',  # options: 'elu', 'selu'
        activation_output=None,  # options: 'tanh', 'sigmoid', 'linear', 'softplus'
        kernel_initializer='zeros',  # options: 'he_normal'
        verbose=1):
    # BatchNorm
    if with_bn:

        def lambda_bn(x):
            x = BatchNormalization()(x)
            return Activation(activation_conv)(x)
    else:

        def lambda_bn(x):
            return x

    # layers For 2D data (e.g. image), "tf" assumes (rows, cols, channels) while "th" assumes (channels, rows, cols).
    inputs = Input((img_rows, img_cols, num_channel_input))
    if verbose:
        print('inputs:', inputs)
    '''
    Modification descriptioin (Charles 11/16/18)

    Added residual blocks to the encoding and decoding sides of the Unet
        See if statements within the for loops
    Added drop out (can also try SpatialDropout2D)
        See drop out layer at end of for loops
    '''
    '''
    Below was modified by Charles
    '''
    # step1
    conv1 = inputs
    conv_identity = []
    for i in range(num_conv_per_pooling):
        if i % 2 == 0 and i != 0:
            conv_identity.append(conv1)
        conv1 = lambda_bn(conv1)
        conv1 = Conv2D(num_channel_first, (3, 3),
                       padding="same",
                       activation=activation_conv,
                       kernel_initializer=kernel_initializer)(conv1)

        if (i + 1) % 2 == 0 and i != 1:
            conv1 = keras_add([conv_identity[-1], conv1])
            pdb.set_trace()  # jiahong apr
    pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
    pool1 = SpatialDropout2D(0.5)(pool1)
    if verbose:
        print('conv1:', conv1, pool1)

    # encoder layers with pooling
    conv_encoders = [inputs, conv1]
    pool_encoders = [inputs, pool1]
    conv_identity = []
    list_num_features = [num_channel_input, num_channel_first]
    for i in range(1, num_poolings):
        # step2
        conv_encoder = pool_encoders[-1]
        num_channel = num_channel_first * (2**(i))
        for j in range(num_conv_per_pooling):
            if j % 2 == 0 and j != 0:
                conv_identity.append(conv_encoder)
            conv_encoder = lambda_bn(conv_encoder)
            conv_encoder = Conv2D(
                num_channel, (3, 3),
                padding="same",
                activation=activation_conv,
                kernel_initializer=kernel_initializer)(conv_encoder)

            if (j + 1) % 2 == 0 and j != 1:
                conv_encoder = keras_add([conv_identity[-1], conv_encoder])
                pdb.set_trace()  # jiahong apr
        pool_encoder = MaxPooling2D(pool_size=(2, 2))(conv_encoder)
        pool_encoder = SpatialDropout2D(0.5)(pool_encoder)
        if verbose:
            print('encoding#{0}'.format(i), conv_encoder, pool_encoder)
        pool_encoders.append(pool_encoder)
        conv_encoders.append(conv_encoder)
        list_num_features.append(num_channel)

    conv_center = Conv2D(list_num_features[-1] * 2, (3, 3),
                         padding="same",
                         activation="relu",
                         kernel_initializer=kernel_initializer,
                         bias_initializer='zeros')(pool_encoders[-1])
    conv_center = Conv2D(list_num_features[-1] * 2, (3, 3),
                         padding="same",
                         activation="relu",
                         kernel_initializer=kernel_initializer,
                         bias_initializer='zeros')(conv_center)

    conv_decoders = [conv_center]
    if verbose:
        print('centers', conv_center)

    # decoder steps
    deconv_identity = []
    for i in range(1, num_poolings + 1):

        attention_gated = create_attention_block_2D(
            Conv2DTranspose(list_num_features[-i], (2, 2),
                            strides=(2, 2),
                            padding="same",
                            activation=activation_conv,
                            kernel_initializer=kernel_initializer)(
                                conv_decoders[-1]), conv_encoders[-i],
            list_num_features[-i])
        upsample_decoder = concatenate([
            Conv2DTranspose(list_num_features[-i], (2, 2),
                            strides=(2, 2),
                            padding="same",
                            activation=activation_conv,
                            kernel_initializer=kernel_initializer)(
                                conv_decoders[-1]), attention_gated
        ])

        conv_decoder = upsample_decoder
        for j in range(num_conv_per_pooling):
            if j % 2 == 0 and j != 0:
                deconv_identity.append(conv_decoder)
            conv_decoder = lambda_bn(conv_decoder)
            conv_decoder = Conv2D(
                list_num_features[-i], (3, 3),
                padding="same",
                activation=activation_conv,
                kernel_initializer=kernel_initializer)(conv_decoder)

            if (j + 1) % 2 == 0 and j != 1:
                conv_decoder = keras_add([deconv_identity[-1], conv_decoder])
                pdb.set_trace()  # jiahong apr
        conv_decoder = SpatialDropout2D(0.5)(conv_decoder)
        conv_decoders.append(conv_decoder)
        if verbose:
            print('decoding#{0}'.format(i), conv_decoder, upsample_decoder)
    '''
    Above was modified by Charles
    '''
    # concatenate with baseline
    if with_baseline_concat:
        conv_decoder = conv_decoders[-1]
        conv_decoder = concatenate([conv_decoder, inputs])
        if verbose:
            print('residual concatenate:', conv_decoder)
    '''
    '''
    # output layer activation
    if output_range is None:
        output_range = np.array(y).flatten()
    if activation_output is None:
        if max(output_range) <= 1 and min(output_range) >= 0:
            activation_output = 'sigmoid'
        elif max(output_range) <= 1 and min(output_range) >= -1:
            activation_output = 'tanh'
        else:
            activation_output = 'linear'
    conv_output = Conv2D(num_channel_output, (1, 1),
                         padding="same",
                         activation=activation_output)(conv_decoder)
    if verbose:
        print('output:', conv_output)

    # add baselind channel
    if with_baseline_addition > 0:
        print('add residual channel {0}#{1}'.format(with_baseline_addition,
                                                    num_channel_input // 2))
        conv_output = keras_add(
            [conv_output, inputs[:, :, :, num_channel_input // 2]])
        pdb.set_trace()  # jiahong apr

    # construct model
    model = Model(outputs=conv_output, inputs=inputs)
    if verbose:
        print('model:', model)

    # optimizer and loss
    if lr_init is not None:
        optimizer = Adam(lr=lr_init)
    else:
        optimizer = Adam()
    model.compile(loss=loss_function,
                  optimizer=optimizer,
                  metrics=metrics_monitor)

    return model
Exemple #27
0
class ZeroShot(CFs):
    def __init__(self, size1=512, size2=128, gru_length=20):
        self.backup_path = f"./training/zeroshot__{size1}__{size2}/mdl.ckpt"
        self.cp_callback = ModelCheckpoint(
            filepath=self.backup_path, save_weights_only=True, verbose=0
        )
        user_input = Input(shape=(gru_length, 768))
        item_input = Input(shape=(768))
        self.inputs = [user_input, item_input]
        layer1 = Dense(size1, activation="relu")
        layer2 = Dense(size2, activation="relu")
        self.layers = [layer1, layer2]
        self.gru = GRU(size2)
        user_present = self.gru(layer2(layer1(user_input)))
        item_present = layer2(layer1(item_input))
        output = Activation(activation="sigmoid")(
            Dot(axes=1)([user_present, item_present])
        )
        self.model = Model(self.inputs, output, name="ZeroShot")
        self.model.compile(
            optimizer="adam",
            loss=BinaryCrossentropy(),
            metrics=[RootMeanSquaredError()],
        )
        self._update_models()
        self._gen_score_layer(size2)

    def _update_models(self):
        item_function = self.layers[1](self.layers[0](self.inputs[1]))
        self.item_model = Model(self.inputs[1], item_function)
        print(self.gru.weights)

    def load(self):
        super().load()
        self._update_models()

    def fit(self, inputs, label, epochs=10, verbose=1):
        super().fit(inputs, label, epochs, verbose)
        self._update_models()

    def _update_models(self):
        item_function = self.layers[1](self.layers[0](self.inputs[1]))
        self.item_model = Model(self.inputs[1], item_function)
        user_function = self.gru(self.layers[1](self.layers[0](self.inputs[0])))
        self.user_model = Model(self.inputs[0], user_function)

    def _gen_score_layer(self, size):
        input = [Input(shape=(size)), Input(shape=(size))]
        output = Activation(activation="sigmoid")(Dot(axes=1)(input))
        self.score_layer = Model(input, output)

    def predict(self, user_data, item_data):
        user_vec = self._embed_user(user_data.reshape(1, 20, 768))
        item_vec = self._embed_item(item_data)
        user_vec = np.repeat(user_vec, item_vec.shape[0], axis=0)
        return self.score_layer.predict([user_vec, item_vec])

    def _embed_item(self, item):
        return self.item_model.predict(item)

    def _embed_user(self, items):
        return self.user_model.predict(items)
for layer in pre_trained_model.layers:
    layer.trainable = False

# pre_trained_model.summary()

last_layer = pre_trained_model.get_layer('mixed7')
print('last layer output shape: ', last_layer.output_shape)
last_output = last_layer.output

# Flatten the output layer to 1 dimension
x = layers.Flatten()(last_output)
# Add a fully connected layer with 1,024 hidden units and ReLU activation
x = layers.Dense(1024, activation='relu')(x)
# Add a dropout rate of 0.2
x = layers.Dropout(0.3)(x)
# Add a final sigmoid layer for classification
x = layers.Dense(1, activation='sigmoid')(x)

model = Model(pre_trained_model.input, x)

model.compile(optimizer=RMSprop(lr=0.0001),
              loss='binary_crossentropy',
              metrics=['acc'])

history = model.fit_generator(train_generator,
                              validation_data=validation_generator,
                              epochs=20,
                              verbose=1)

model.save("catsdogs.h5")
Exemple #29
0
 def _update_models(self):
     item_function = self.layers[1](self.layers[0](self.inputs[1]))
     self.item_model = Model(self.inputs[1], item_function)
     user_function = self.gru(self.layers[1](self.layers[0](self.inputs[0])))
     self.user_model = Model(self.inputs[0], user_function)
    train_subjects.index, train_targets,
    shuffle=True)  # train_subjects.index for selecting training nodes
test_gen = generator.flow(test_subjects.index, test_targets)

# aggregatortype = MaxPoolingAggregator()
graphsage_model = GraphSAGE(layer_sizes=[32, 16, 8, 8],
                            generator=generator,
                            activations=["relu", "relu", "relu", "linear"],
                            bias=True,
                            dropout=0.0)

x_inp, x_out = graphsage_model.in_out_tensors()
prediction = layers.Dense(units=train_targets.shape[1],
                          activation="linear")(x_out)

model = Model(inputs=x_inp, outputs=prediction)

##%% ##################################### Model training #######################################################
#%% ############################################################################################################

indices = bf.expandy(batch_size, 1)


def noderankloss(index):
    def loss(y_true, y_pred):
        # tf.print(tf.gather(y_true, tf.constant(index[:, 0])))

        yt = tf.math.sigmoid(
            tf.gather(y_true, tf.constant(index[:, 0])) -
            tf.gather(y_true, tf.constant(index[:, 1])))
        yp = tf.math.sigmoid(