def build_generator(x, nb_rows, nb_cols):
    """ Generator sub-component for the CaloGAN

    Args:
    -----
        x: a tensorflow.keras Input with shape (None, latent_dim)
        nb_rows: int, number of desired output rows
        nb_cols: int, number of desired output cols

    Returns:
    --------
        a tensorflow.keras tensor with the transformation applied
    """

    x = Dense((nb_rows + 2) * (nb_cols + 2) * 36)(x)
    x = Reshape((nb_rows + 2, nb_cols + 2, 36))(x)

    x = Conv2D(16, (2, 2), padding='same', kernel_initializer='he_uniform')(x)
    x = LeakyReLU()(x)
    x = BatchNormalization()(x)

    x = LocallyConnected2D(6, (2, 2), kernel_initializer='he_uniform')(x)
    x = LeakyReLU()(x)

    x = LocallyConnected2D(1, (2, 2),
                           use_bias=False,
                           kernel_initializer='glorot_normal')(x)

    return x
Exemple #2
0
 def use_deepface(self):
     """
     Define DeepFace model.
     ----------
     - Returns
     
     face: Functional
         DeepFace model structure.
     """
     Input_1 = Input(shape=(152, 152, 3), name='face_Input')
     Conv_1 = Conv2D(32, (11, 11), activation='relu',
                     name='face_Conv_1')(Input_1)
     MaxPool_1 = MaxPooling2D(pool_size=(3, 3),
                              strides=(2, 2),
                              padding='same',
                              name='face_MaxPool_1')(Conv_1)
     Conv_2 = Conv2D(16, (9, 9), activation='relu',
                     name='face_Conv_2')(MaxPool_1)
     LC_1 = LocallyConnected2D(16, (9, 9),
                               activation='relu',
                               name='face_LC_1')(Conv_2)
     LC_2 = LocallyConnected2D(16, (7, 7),
                               strides=(2, 2),
                               activation='relu',
                               name='face_LC_2')(LC_1)
     LC_3 = LocallyConnected2D(16, (5, 5),
                               activation='relu',
                               name='face_LC_3')(LC_2)  # output 21, 21, 16
     Flatten_1 = Flatten(name='face_Flatten')(LC_3)  # output 7056
     FC_1 = Dense(4096, activation='relu')(Flatten_1)
     Drop_1 = Dropout(0.5)(FC_1)
     FC_2 = Dense(8631, activation='softmax')(Drop_1)
     face = Model(inputs=Input_1, outputs=FC_2)
     return face
Exemple #3
0
def make_lcn():
    in2 = Input(shape=(
        36,
        19,
        30,
    ), name="in2", dtype="float32")
    F = LocallyConnected2D(name="layerF",
                           filters=30,
                           kernel_size=(3, 2),
                           padding='valid',
                           input_shape=(36, 19, 30),
                           data_format='channels_last',
                           activation='relu',
                           use_bias=True)(in2)
    G = LocallyConnected2D(name="layerG",
                           filters=25,
                           kernel_size=(3, 2),
                           padding='valid',
                           input_shape=(34, 18, 30),
                           data_format='channels_last',
                           activation='relu',
                           use_bias=True)(F)
    H = LocallyConnected2D(name="layerH",
                           filters=20,
                           kernel_size=(3, 2),
                           padding='valid',
                           input_shape=(32, 17, 25),
                           data_format='channels_last',
                           activation='relu',
                           use_bias=True)(G)
    I = LocallyConnected2D(name="layerI",
                           filters=15,
                           kernel_size=(3, 2),
                           padding='valid',
                           input_shape=(30, 16, 20),
                           data_format='channels_last',
                           activation='relu',
                           use_bias=True)(H)
    J = LocallyConnected2D(name="layerJ",
                           filters=10,
                           kernel_size=(3, 2),
                           padding='valid',
                           input_shape=(28, 15, 15),
                           data_format='channels_last',
                           activation='relu',
                           use_bias=True)(I)
    model = Model(in2, outputs=J)
    metrics_to_output = ['accuracy']
    model.compile(loss='mean_squared_error',
                  optimizer='adam',
                  metrics=metrics_to_output)
    return model
Exemple #4
0
def loadModel(
    url='https://github.com/swghosh/DeepFace/releases/download/weights-vggface2-2d-aligned/VGGFace2_DeepFace_weights_val-0.9034.h5.zip'
):
    base_model = Sequential()
    base_model.add(
        Convolution2D(32, (11, 11),
                      activation='relu',
                      name='C1',
                      input_shape=(152, 152, 3)))
    base_model.add(
        MaxPooling2D(pool_size=3, strides=2, padding='same', name='M2'))
    base_model.add(Convolution2D(16, (9, 9), activation='relu', name='C3'))
    base_model.add(LocallyConnected2D(16, (9, 9), activation='relu',
                                      name='L4'))
    base_model.add(
        LocallyConnected2D(16, (7, 7), strides=2, activation='relu',
                           name='L5'))
    base_model.add(LocallyConnected2D(16, (5, 5), activation='relu',
                                      name='L6'))
    base_model.add(Flatten(name='F0'))
    base_model.add(Dense(4096, activation='relu', name='F7'))
    base_model.add(Dropout(rate=0.5, name='D0'))
    base_model.add(Dense(8631, activation='softmax', name='F8'))

    #---------------------------------

    home = str(Path.home())

    if os.path.isfile(
            home + '/.deepface/weights/VGGFace2_DeepFace_weights_val-0.9034.h5'
    ) != True:
        print("VGGFace2_DeepFace_weights_val-0.9034.h5 will be downloaded...")

        output = home + '/.deepface/weights/VGGFace2_DeepFace_weights_val-0.9034.h5.zip'

        gdown.download(url, output, quiet=False)

        #unzip VGGFace2_DeepFace_weights_val-0.9034.h5.zip
        with zipfile.ZipFile(output, 'r') as zip_ref:
            zip_ref.extractall(home + '/.deepface/weights/')

    base_model.load_weights(
        home + '/.deepface/weights/VGGFace2_DeepFace_weights_val-0.9034.h5')

    #drop F8 and D0. F7 is the representation layer.
    deepface_model = Model(inputs=base_model.layers[0].input,
                           outputs=base_model.layers[-3].output)

    return deepface_model
def build_decoder(size,
                  encoded_shape,
                  in_channels=1,
                  latent_dim=8,
                  dump=False):
    """Create decoder from latent space back to grid."""
    latent_inputs = Input(shape=(latent_dim, ), name='z_sampling')
    x = Dense(encoded_shape[1] * encoded_shape[2] * encoded_shape[3],
              activation=relu)(latent_inputs)
    x = Reshape((encoded_shape[1], encoded_shape[2], encoded_shape[3]))(x)

    x = LocallyConnected2D(32, (3, 3))(x)
    x = ZeroPadding2D(padding=(1, 1))(x)

    x = UpSampling2D((2, 2))(x)
    x = Conv2D(32, (3, 3), activation=relu, padding='same')(x)

    x = UpSampling2D((2, 2))(x)
    x = Conv2D(16, (3, 3), activation=relu, padding='same')(x)

    x = UpSampling2D((2, 2))(x)
    x = Conv2D(16, (3, 3), activation=relu, padding='same')(x)

    x = UpSampling2D((2, 2))(x)
    x = Conv2D(8, (3, 3), activation=relu, padding='same')(x)

    x = UpSampling2D((2, 2))(x)
    # can't use Activation(sigmoid)(x) because we need a conv layer to reduce 4-channels to 1-channel
    decoded_layer = Conv2D(1, (3, 3), activation=relu, padding='same')(x)
    decoder = Model(latent_inputs, decoded_layer, name='vae_decoder')
    if dump:
        decoder.summary()
        plot_model(decoder, to_file='dicom_decoder.png', show_shapes=True)

    return decoder
Exemple #6
0
def deepface(input_shape=None):
    input_data = Input(shape=input_shape)

    conv1 = Conv2D(32, (11, 11), activation='relu', name='C1')(input_data)
    maxpool1 = MaxPooling2D(pool_size=3, strides=2, padding='same',
                            name='M2')(conv1)
    conv2 = Conv2D(16, (9, 9), activation='relu', name='C3')(maxpool1)
    lconv1 = LocallyConnected2D(16, (9, 9), activation='relu',
                                name='L4')(conv2)
    lconv2 = LocallyConnected2D(16, (7, 7),
                                strides=2,
                                activation='relu',
                                name='L5')(lconv1)
    lconv3 = LocallyConnected2D(16, (5, 5), activation='relu',
                                name='L6')(lconv2)
    flat = Flatten(name='F0')(lconv3)
    fc1 = Dense(4096, activation='relu', name='F7')(flat)
    drop = Dropout(rate=0.5, name='D0')(fc1)
    fc2 = Dense(8631, activation='softmax', name='F8')(drop)

    model = Model(inputs=input_data, outputs=fc2, name="DeepFace")

    return model
Exemple #7
0
def attention_map_model(backbone):
    """ 
    Add attention map branch to CNN
    use DenseNet121, MobileNet as backbone
    
    Example:
    backbone = DenseNet121(weights='imagenet', include_top=False)
    backbone = MobileNet(weights='imagenet', include_top=False)
    """

    in_layer = Input((80,80,2)) # ???? dimesnions question ????
    reshape_layer = Conv2D(3, (3,3), activation='relu', padding='same', input_shape=(80,80,2))
    for layer in backbone.layers:
        layer.trainable = True
    
    reshape_ = reshape_layer(in_layer)
    pt_depth = backbone.get_output_shape_at(0)[-1]
    pt_features = backbone(reshape_)
    bn_features = BatchNormalization()(pt_features)
    
    # attention mech to turn pixels in GAP on/ off
    attn_layer = Conv2D(64, (1,1), padding='same', activation='relu')(bn_features)
    attn_layer = Conv2D(64, (1,1), padding='same', activation='relu')(attn_layer)
    attn_layer = LocallyConnected2D(1, (1,1), padding='valid', activation='sigmoid')(attn_layer)
    
    # insert it to backbone branch
    # initialize weights
    up_c2_w = np.ones((1, 1, 1, pt_depth))
    up_c2 = Conv2D(pt_depth, (1,1), padding='same', activation='linear', use_bias=False, weights=[up_c2_w])
    up_c2.trainable = False
    attn_layer = up_c2(attn_layer)
    
    # get together attn_layer and bn_features branches
    mask_features = multiply([attn_layer, bn_features])
    gap_features = GlobalAveragePooling2D()(mask_features)
    gap_mask = GlobalAveragePooling2D()(attn_layer)
    
    # account for missing values from attention model
    gap = Lambda(lambda x: x[0]/x[1], name='RescaleGAP')([gap_features, gap_mask])
    gap_dr = Dropout(0.5)(gap)
    dr_steps = Dropout(0.25)(Dense(1024, activation='elu')(gap_dr))
    # linear 16 bit
    out_layer = Dense(1, activation='sigmoid')(dr_steps)
    
    attn_model = models.Model(inputs=[in_layer], outputs=[out_layer])
    
    return attn_model
Exemple #8
0
def inpainting_attention(primary, carryover, constant=-10):
    def _initialize_bias(const=-5):
        def _(shape, dtype=None):
            assert len(shape) == 3, 'must be a 3D shape'
            x = np.zeros(shape)
            x[:, :, -1] = const
            return x

        return _

    x = concatenate([primary, carryover], axis=-1)
    h = ZeroPadding2D((1, 1))(x)
    lcn = LocallyConnected2D(filters=2,
                             kernel_size=(3, 3),
                             bias_initializer=_initialize_bias(constant))

    h = lcn(h)
    weights = Lambda(channel_softmax)(h)

    channel_sum = Lambda(K.sum, arguments={'axis': -1, 'keepdims': True})

    return channel_sum(multiply([x, weights]))
def build_encoded_layer(input_img, l1_l2=(0.0e-4, 0.0e-4), use_dropout=True):
    """Create encoded layer, prior to projection to latent space."""
    x = Conv2D(8, (3, 3), activation=relu, padding='same')(input_img)
    x = MaxPooling2D((2, 2), padding='same')(x)

    if use_dropout:
        x = SpatialDropout2D(0.1)(x)
    x = Conv2D(16, (3, 3), activation=relu, padding='same')(x)
    x = MaxPooling2D((2, 2), padding='same')(x)

    x = Conv2D(16, (3, 3), activation=relu, padding='same')(x)
    x = MaxPooling2D((2, 2), padding='same')(x)

    x = Conv2D(32, (3, 3), activation=relu, padding='same')(x)
    x = MaxPooling2D((2, 2), padding='same')(x)

    x = LocallyConnected2D(32, (3, 3))(x)
    x = ZeroPadding2D(padding=(1, 1))(x)
    x = MaxPooling2D((2, 2), padding='same')(x)
    l1, l2 = l1_l2
    encoded_layer = ActivityRegularization(l1=l1, l2=l2)(x)
    return encoded_layer
# Phase 1: in2 -> ABCDE

pre = Reshape( target_shape=(36, 19, 27,) )( input2 )#ALWAYS DO WIDTH, HEIGHT, CHANNELS
A = Conv2D( name="layerA", filters=30, kernel_size=(1,1), padding='valid', input_shape=(36, 19, 27), data_format='channels_last', activation='relu', use_bias=True )( pre )
B = Conv2D( name="layerB", filters=30, kernel_size=(1,1), padding='valid', input_shape=A.shape, data_format='channels_last', activation='relu', use_bias=True )( A )
C = Conv2D( name="layerC", filters=30, kernel_size=(1,1), padding='valid', input_shape=B.shape, data_format='channels_last', activation='relu', use_bias=True )( B )
D = Conv2D( name="layerD", filters=30, kernel_size=(1,1), padding='valid', input_shape=C.shape, data_format='channels_last', activation='relu', use_bias=True )( C )
E = Conv2D( name="layerE", filters=30, kernel_size=(1,1), padding='valid', input_shape=D.shape, data_format='channels_last', activation='relu', use_bias=True )( D )


# Phase 2: FGHIJ
E2 = tensorflow.keras.layers.concatenate( [E, E], name="merge_E_E", axis=1 )
#print( E2.shape )
#exit( 0 )

F = LocallyConnected2D( name="layerF", filters=20, kernel_size=(4,2), strides=(2,1), padding='valid', input_shape=E2.shape, data_format='channels_last', activation='relu', use_bias=True )( E2 )

G = LocallyConnected2D( name="layerG", filters=14, kernel_size=(3,2), strides=(1,1), padding='valid', input_shape=F.shape, data_format='channels_last', activation='relu', use_bias=True )( F )

H = LocallyConnected2D( name="layerH", filters=13, kernel_size=(3,2), strides=(1,1), padding='valid', input_shape=G.shape, data_format='channels_last', activation='relu', use_bias=True )( G )

I = LocallyConnected2D( name="layerI", filters=12, kernel_size=(3,2), strides=(1,1), padding='valid', input_shape=H.shape, data_format='channels_last', activation='relu', use_bias=True )( H )

J = LocallyConnected2D( name="layerJ", filters=10, kernel_size=(3,2), strides=(1,1), padding='valid', input_shape=I.shape, data_format='channels_last', activation='relu', use_bias=True )( I )

#print( Ia.shape )
#exit( 0 )

# Phase 3: flatJ, merge, KLMN, output
flatJ = Flatten( name="flatJ", data_format='channels_last' )( J )
print( flatJ.shape )
Exemple #11
0
           kernel_size=(1, 1),
           padding='valid',
           data_format='channels_last',
           activation='relu',
           use_bias=True)(pre)
B1 = Conv2D(name="layerB1",
            filters=10,
            kernel_size=(1, 1),
            padding='valid',
            data_format='channels_last',
            activation='relu',
            use_bias=True)(A)
B2 = LocallyConnected2D(name="layerB2",
                        filters=5,
                        kernel_size=(1, 1),
                        padding='valid',
                        data_format='channels_last',
                        activation='relu',
                        use_bias=True)(B1)
B3 = LocallyConnected2D(name="layerB3",
                        filters=5,
                        kernel_size=(1, 1),
                        padding='valid',
                        data_format='channels_last',
                        activation='relu',
                        use_bias=True)(B2)
merge = tensorflow.keras.layers.concatenate([B3, in1up], name="merge", axis=-1)

# Phase 2: FGHIJ
C = LocallyConnected2D(name="layerC",
                       filters=3,
def buildNet(inputShape, numUniqueClasses):
    """
    :param inputShape: The shape of the network input. The first element is
        always set to None to allow for arbitrary batch sizes.
    :param numUniqueClasses: Number of different classes.
    """

    model_input = Input(shape=inputShape, name='model_input')

    layers = ResidualLayer(model_input,
                           name='res1',
                           num_filters=8,
                           filter_size=(3, 1))

    # First spatial reduction
    layers = ResidualLayer(layers,
                           name='res2',
                           num_filters=8,
                           filter_size=(3, 1),
                           stride=(5, 1))

    layers = ResidualLayer(layers,
                           name='res3',
                           num_filters=8,
                           filter_size=(3, 1))

    # Second spatial reduction
    layers = ResidualLayer(layers,
                           name='res4',
                           num_filters=1,
                           filter_size=(3, 1),
                           stride=(3, 1))

    layers = Activation(activation='relu', name='act1')(layers)

    layers = Dropout(rate=0.3, name='dropout1')(layers)

    layers = ZeroPadding2D((2, 0))(layers)

    # Note: deleted the bias and moved the order of layer-BN-NL to match
    # the original lasagne version, which was done with the batch_norm method.
    layers = LocallyConnected2D(filters=1,
                                kernel_size=(5, 1),
                                kernel_initializer='he_uniform',
                                activation='linear',
                                use_bias=False,
                                name='loc_con1')(layers)

    layers = BatchNormalization(name='bn1')(layers)

    layers = Activation(activation='relu', name='act_after_loc')(layers)

    layers = Flatten(name='flatten1')(layers)

    layers = Dense(units=numUniqueClasses,
                   activation='linear',
                   name='model_logits')(layers)

    model_output = Activation(activation='softmax',
                              name='model_output')(layers)

    model = Model(inputs=model_input, outputs=model_output)

    return model
           input_shape=(36, 19, 30),
           data_format='channels_last',
           activation='relu',
           use_bias=True)(D)

# Phase 2: FGHIJ
#Ea = UpSampling2D( name="layerEa", size=(2, 1), data_format='channels_last' )( E )
Ea = tensorflow.keras.layers.concatenate([E, E], name="merge_E_E", axis=1)
#print( Ea.shape )
#exit( 0 )

F = LocallyConnected2D(name="layerF",
                       filters=30,
                       strides=(3, 1),
                       kernel_size=(4, 2),
                       padding='valid',
                       input_shape=(72, 19, 30),
                       data_format='channels_last',
                       activation='relu',
                       use_bias=True)(Ea)
Fa = tensorflow.keras.layers.concatenate([F, F], name="merge_F_F", axis=1)

G = LocallyConnected2D(name="layerG",
                       filters=25,
                       strides=(3, 1),
                       kernel_size=(4, 2),
                       padding='valid',
                       input_shape=(46, 18, 30),
                       data_format='channels_last',
                       activation='relu',
                       use_bias=True)(Fa)
           kernel_size=(1, 1),
           padding='valid',
           data_format='channels_last',
           use_bias=True)(pre)
A = LeakyReLU()(A)
B1 = Conv2D(name="layerB1",
            filters=8,
            kernel_size=(1, 1),
            padding='valid',
            data_format='channels_last',
            use_bias=True)(A)
B1 = LeakyReLU()(B1)
B4 = LocallyConnected2D(name="layerB4",
                        filters=4,
                        kernel_size=(2, 3),
                        strides=(2, 2),
                        padding='valid',
                        data_format='channels_last',
                        activation=None,
                        use_bias=True)(B1)

X = Conv2D(name="layerX",
           filters=8,
           kernel_size=(1, 1),
           padding='valid',
           data_format='channels_last',
           use_bias=True)(pre)
X = LeakyReLU()(X)
Y = LocallyConnected2D(name="layerY",
                       filters=6,
                       kernel_size=(1, 1),
                       padding='valid',
Exemple #15
0
def main():
    os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
    ###################### LOADING DATA #############################
    print("\n--------------- MY DATA -----------------------\n")
    # Load training data into an array
    train_array = np.loadtxt("zip.train")
    # extracting the levels from the training array
    train_levels_data = train_array[0:7291, 0:1]
    # convert the class vector to binary class matrix
    # as many columns as there are classes
    train_levels = to_categorical(train_levels_data)
    # extracting the features from the training array
    train_features = train_array[0:7291, 1:257]
    # Get total number of training data
    total_train = len(train_features)

    # Load testing data into an array
    test_array = np.loadtxt("zip.test")
    # extract test levels from test_array, for a sample of data
    test_levels = test_array[0:2007, 0:1]
    # extract teat features from test_array, for a sample of data
    test_features = test_array[0:2007, 1:257]

    # Get total number of test data
    total_test = len(test_features)
    # total size of all data
    total_val = total_test + total_train

    # View the data shape
    # print(test_features.shape)
    # Output is (2007, 257)

    # this size is not given in the book
    batch_size = 256
    # figure had 30 training epochs
    start_epoch = 0
    num_epochs = 30
    interval = 1
    # 16 x 16 greyscale images
    IMG_HEIGHT = 16
    IMG_WIDTH = 16

    # Net-2 Archetecture (two layers dense)
    model_two = Sequential([
        Dense(12, input_dim=256, activation='sigmoid'),
        Dense(10, input_dim=256, activation='sigmoid')
    ])
    # Net-3 Archetecture (two local and one Dense)
    model_three = Sequential([
        LocallyConnected2D(64, (3, 3),
                           input_shape=(16, 16, 1),
                           activation='sigmoid'),
        LocallyConnected2D(16, (5, 5), activation='sigmoid'),
        Flatten(),
        Dense(10, activation='sigmoid')
    ])
    # Net-4 Archetecture (conv2d locally connected, and dense last layer)
    model_four = Sequential([
        Conv2D(128, (3, 3), input_shape=(16, 16, 1), activation='sigmoid'),
        LocallyConnected2D(16, (5, 5), activation='sigmoid'),
        Flatten(),
        Dense(10, activation='sigmoid')
    ])
    # Net-5 Archetecture (conv2d, dense layer)
    model_five = Sequential([
        Conv2D(128, (3, 3), input_shape=(16, 16, 1), activation='sigmoid'),
        LocallyConnected2D(16, (5, 5), activation='sigmoid'),
        Flatten(),
        Dense(10, activation='sigmoid')
    ])

    # currently only trying toget the network one correct percent on test data
    # archetecture seems tight but isnt giving vallues shown in the figure
    net1_correct = net_one(train_levels, train_features, test_levels,
                           test_features, num_epochs, batch_size)

    graph_properties = np.arange(start_epoch, num_epochs, interval)
    plt.figure(figsize=(8, 8))
    plt.subplot(2, 1, 1)
    plt.plot(graph_properties, net1_correct, label='Net-1')
    plt.xlabel("Training Epochs")
    plt.ylabel("% Correct on Test Data")
    plt.legend(loc='lower right')
    plt.show()
           kernel_size=(1, 1),
           padding='valid',
           data_format='channels_last',
           use_bias=True)(pre)
A = LeakyReLU()(A)
B1 = Conv2D(name="layerB1",
            filters=10,
            kernel_size=(1, 1),
            padding='valid',
            data_format='channels_last',
            use_bias=True)(A)
B1 = LeakyReLU()(B1)
B4 = LocallyConnected2D(name="layerB4",
                        filters=5,
                        kernel_size=(4, 3),
                        strides=(4, 2),
                        padding='valid',
                        data_format='channels_last',
                        activation=None,
                        use_bias=True)(B1)

C = Add()([B4, in1merge])
C = LeakyReLU()(C)
E = LocallyConnected2D(name="layerE",
                       filters=4,
                       kernel_size=(3, 3),
                       strides=(3, 3),
                       padding='valid',
                       data_format='channels_last',
                       use_bias=True)(C)
E = LeakyReLU()(E)
'''
Exemple #17
0
batch_size_i_array = np.logspace(0, 4, 10)

make_x_particles = False
loops = 100
''' Create the network. '''
latent_size = 200

loc = Sequential([
    Dense(128 * 7 * 7, input_dim=latent_size),
    Reshape((7, 7, 128)),
    Conv2D(64, (5, 5), padding='same', kernel_initializer='he_uniform'),
    LeakyReLU(),
    BatchNormalization(),
    UpSampling2D(size=(2, 2), interpolation='bilinear'),
    ZeroPadding2D((2, 2)),
    LocallyConnected2D(6, (5, 5), kernel_initializer='he_uniform'),
    LeakyReLU(),
    BatchNormalization(),
    UpSampling2D(size=(2, 2), interpolation='bilinear'),
    LocallyConnected2D(6, (3, 3), kernel_initializer='he_uniform'),
    LeakyReLU(),
    LocallyConnected2D(1, (2, 2),
                       use_bias=False,
                       kernel_initializer='glorot_normal'),
    Activation('relu')
])

latent = Input(shape=(latent_size, ))

image_class = Input(shape=(1, ), dtype='int32')
emb = Flatten()(Embedding(2,
def build_discriminator(image, mbd=False, sparsity=False, sparsity_mbd=False):
    """ Generator sub-component for the CaloGAN

    Args:
    -----
        image: tensorflow.keras tensor of 4 dimensions (i.e. the output of one calo layer)
        mdb: bool, perform feature level minibatch discrimination
        sparsiry: bool, whether or not to calculate and include sparsity
        sparsity_mdb: bool, perform minibatch discrimination on the sparsity 
            values in a batch

    Returns:
    --------
        a tensorflow.keras tensor of features

    """

    x = Conv2D(64, (2, 2), padding='same')(image)
    x = LeakyReLU()(x)

    x = ZeroPadding2D((1, 1))(x)
    x = LocallyConnected2D(16, (3, 3), padding='valid', strides=(1, 2))(x)
    x = LeakyReLU()(x)
    x = BatchNormalization()(x)

    x = ZeroPadding2D((1, 1))(x)
    x = LocallyConnected2D(8, (2, 2), padding='valid')(x)
    x = LeakyReLU()(x)
    x = BatchNormalization()(x)

    x = ZeroPadding2D((1, 1))(x)
    x = LocallyConnected2D(8, (2, 2), padding='valid', strides=(1, 2))(x)
    x = LeakyReLU()(x)
    x = BatchNormalization()(x)

    x = Flatten()(x)

    if mbd or sparsity or sparsity_mbd:
        minibatch_featurizer = Lambda(minibatch_discriminator,
                                      output_shape=minibatch_output_shape)

        features = [x]
        nb_features = 10
        vspace_dim = 10

        # creates the kernel space for the minibatch discrimination
        if mbd:
            K_x = Dense3D(nb_features, vspace_dim)(x)
            features.append(Activation('tanh')(minibatch_featurizer(K_x)))

        if sparsity or sparsity_mbd:
            sparsity_detector = Lambda(sparsity_level, sparsity_output_shape)
            empirical_sparsity = sparsity_detector(image)
            if sparsity:
                features.append(empirical_sparsity)
            if sparsity_mbd:
                K_sparsity = Dense3D(nb_features,
                                     vspace_dim)(empirical_sparsity)
                features.append(
                    Activation('tanh')(minibatch_featurizer(K_sparsity)))

        return concatenate(features)
    else:
        return x
            activation='relu',
            use_bias=True)(B1)
B3 = Conv2D(name="layerB3",
            filters=15,
            kernel_size=(1, 1),
            padding='valid',
            data_format='channels_last',
            activation='relu',
            use_bias=True)(B2)
merge = tensorflow.keras.layers.concatenate([B3, in1up], name="merge", axis=-1)

# Phase 2: FGHIJ
C = LocallyConnected2D(name="layerC",
                       filters=12,
                       kernel_size=(1, 1),
                       strides=(1, 1),
                       padding='valid',
                       data_format='channels_last',
                       activation='relu',
                       use_bias=True)(merge)
D = LocallyConnected2D(name="layerD",
                       filters=10,
                       kernel_size=(1, 1),
                       strides=(1, 1),
                       padding='valid',
                       data_format='channels_last',
                       activation='relu',
                       use_bias=True)(C)
E = LocallyConnected2D(name="layerE",
                       filters=10,
                       kernel_size=(1, 1),
                       strides=(1, 1),
Exemple #20
0
           kernel_size=(1, 1),
           padding='valid',
           data_format='channels_last',
           use_bias=True)(pre)
A = LeakyReLU()(A)
B1 = Conv2D(name="layerB1",
            filters=8,
            kernel_size=(1, 1),
            padding='valid',
            data_format='channels_last',
            use_bias=True)(A)
B1 = LeakyReLU()(B1)
B4 = LocallyConnected2D(name="layerB4",
                        filters=merge_depth,
                        kernel_size=(2, 3),
                        strides=(2, 2),
                        padding='valid',
                        data_format='channels_last',
                        activation=None,
                        use_bias=True)(B1)

X1a = Conv2D(name="layerX1a",
             filters=4,
             kernel_size=(1, 1),
             padding='valid',
             data_format='channels_last',
             use_bias=True)(pre)
X1a = LeakyReLU()(X1a)
Y1a = LocallyConnected2D(name="layerY1a",
                         filters=4,
                         kernel_size=(1, 1),
                         padding='valid',
Exemple #21
0
num_input_dimensions2 = 18494 - num_input_dimensions1

input1 = Input(shape=(num_input_dimensions1, ), name="in1", dtype="float32")

#input2 = Input(shape=(36, 19, 27,), name="in2", dtype="float32" )
input2 = Input(shape=(18468, ), name="in2", dtype="float32")

pre = Reshape(target_shape=(
    36,
    19,
    27,
))(input2)  #ALWAYS DO WIDTH, HEIGHT, CHANNELS
l1 = LocallyConnected2D(name="layer1",
                        filters=30,
                        kernel_size=(3, 2),
                        padding='valid',
                        input_shape=(36, 19, 27),
                        data_format='channels_last',
                        activation='relu',
                        use_bias=True)(pre)
l2 = LocallyConnected2D(name="layer2",
                        filters=25,
                        kernel_size=(3, 2),
                        padding='valid',
                        input_shape=(34, 18, 30),
                        data_format='channels_last',
                        activation='relu',
                        use_bias=True)(l1)
l3 = LocallyConnected2D(name="layer3",
                        filters=20,
                        kernel_size=(3, 2),
                        padding='valid',