Пример #1
0
    def __init__(self, inshape, nb_unet_features=None, mean_cap=100, atlas_feats=1, src_feats=1, **kwargs):
        """ 
        Parameters:
            inshape: Input shape. e.g. (192, 192, 192)
            nb_unet_features: Unet convolutional features. See VxmDense documentation for more information.
            mean_cap: Cap for mean stream. Default is 100.
            atlas_feats: Number of atlas/template features. Default is 1.
            src_feats: Number of source image features. Default is 1.
            kwargs: Forwarded to the internal VxmDense model.
        """

        # configure inputs
        atlas_input = tf.keras.Input(shape=[*inshape, atlas_feats], name='atlas_input')
        source_input = tf.keras.Input(shape=[*inshape, src_feats], name='source_input')

        # pre-warp (atlas) model
        atlas_layer = ne.layers.LocalParamWithInput(name='atlas', shape=(*inshape, 1), mult=1.0, initializer=KI.RandomNormal(mean=0.0, stddev=1e-7))
        atlas_tensor = atlas_layer(atlas_input)
        warp_input_model = tf.keras.Model([atlas_input, source_input], outputs=[atlas_tensor, source_input])

        # warp model
        vxm_model = VxmDense(inshape, nb_unet_features=nb_unet_features, bidir=True, input_model=warp_input_model, **kwargs)

        # extract tensors from stacked model
        y_source = vxm_model.references.y_source
        y_target = vxm_model.references.y_target
        pos_flow = vxm_model.references.pos_flow
        neg_flow = vxm_model.references.neg_flow

        # get mean stream of negative flow
        mean_stream = ne.layers.MeanStream(name='mean_stream', cap=mean_cap)(neg_flow)

        # initialize the keras model
        super().__init__(inputs=[atlas_input, source_input], outputs=[y_source, y_target, mean_stream, pos_flow])

        # cache pointers to important layers and tensors for future reference
        self.references = LoadableModel.ReferenceContainer()
        self.references.atlas_layer = atlas_layer
        self.references.atlas_tensor = atlas_tensor
Пример #2
0
from tensorflow.keras import initializers
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt

#inicializar os pesos de redes neurais

#distribuição normal
normal = initializers.RandomNormal()
dados_normal = normal(shape=[1000])
media = np.mean(dados_normal)
desvio_padrao = np.std(dados_normal)
print(media, desvio_padrao)
sns.displot(dados_normal)
plt.show()

#distribuição uniforma
uniforme = initializers.RandomUniform()
dados_uniforme = uniforme(shape=[1000])
sns.displot(dados_uniforme)
plt.show()
Пример #3
0
 def __init__(
     self,
     width,
     depth,
     num_classes=20,
     num_anchors=9,
     separable_conv=True,
     freeze_bn=False,
     **kwargs,
 ):
     super(ClassNet, self).__init__(**kwargs)
     self.width = width
     self.depth = depth
     self.num_classes = num_classes
     self.num_anchors = num_anchors
     self.separable_conv = separable_conv
     options = {
         "kernel_size": 3,
         "strides": 1,
         "padding": "same",
     }
     if self.separable_conv:
         kernel_initializer = {
             "depthwise_initializer": initializers.VarianceScaling(),
             "pointwise_initializer": initializers.VarianceScaling(),
         }
         options.update(kernel_initializer)
         self.convs = [
             layers.SeparableConv2D(
                 filters=width,
                 bias_initializer="zeros",
                 name=f"{self.name}/class-{i}",
                 **options,
             ) for i in range(depth)
         ]
         self.head = layers.SeparableConv2D(
             filters=num_classes * num_anchors,
             bias_initializer=PriorProbability(probability=0.01),
             name=f"{self.name}/class-predict",
             **options,
         )
     else:
         kernel_initializer = {
             "kernel_initializer":
             initializers.RandomNormal(mean=0.0, stddev=0.01, seed=None)
         }
         options.update(kernel_initializer)
         self.convs = [
             layers.Conv2D(
                 filters=width,
                 bias_initializer="zeros",
                 name=f"{self.name}/class-{i}",
                 **options,
             ) for i in range(depth)
         ]
         self.head = layers.Conv2D(
             filters=num_classes * num_anchors,
             bias_initializer=PriorProbability(probability=0.01),
             name="class-predict",
             **options,
         )
     self.bns = [[
         layers.BatchNormalization(momentum=MOMENTUM,
                                   epsilon=EPSILON,
                                   name=f"{self.name}/class-{i}-bn-{j}")
         for j in range(3, 8)
     ] for i in range(depth)]
     # self.bns = [[BatchNormalization(freeze=freeze_bn, name=f'{self.name}/class-{i}-bn-{j}') for j in range(3, 8)]
     #             for i in range(depth)]
     self.relu = layers.Lambda(lambda x: tf.nn.swish(x))
     self.reshape = layers.Reshape((-1, num_classes))
     self.activation = layers.Activation("sigmoid")
     self.level = 0
Пример #4
0
def main(config="../../config.yaml", namespace=""):
    # obtain config
    if isinstance(config, str):
        config = load_job_config(config)
    parties = config.parties
    guest = parties.guest[0]
    host = parties.host[0]
    backend = config.backend
    work_mode = config.work_mode

    guest_train_data = {
        "name": "nus_wide_guest",
        "namespace": f"experiment{namespace}"
    }
    host_train_data = {
        "name": "nus_wide_host",
        "namespace": f"experiment{namespace}"
    }
    pipeline = PipeLine().set_initiator(role='guest',
                                        party_id=guest).set_roles(guest=guest,
                                                                  host=host)

    reader_0 = Reader(name="reader_0")
    reader_0.get_party_instance(
        role='guest', party_id=guest).component_param(table=guest_train_data)
    reader_0.get_party_instance(
        role='host', party_id=host).component_param(table=host_train_data)

    dataio_0 = DataIO(name="dataio_0")
    dataio_0.get_party_instance(role='guest', party_id=guest).component_param(
        with_label=True, output_format="dense")
    dataio_0.get_party_instance(
        role='host', party_id=host).component_param(with_label=False)

    hetero_ftl_0 = HeteroFTL(name='hetero_ftl_0',
                             epochs=10,
                             alpha=1,
                             batch_size=-1,
                             mode='plain',
                             communication_efficient=True,
                             local_round=5)

    hetero_ftl_0.add_nn_layer(
        Dense(units=32,
              activation='sigmoid',
              kernel_initializer=initializers.RandomNormal(stddev=1.0),
              bias_initializer=initializers.Zeros()))
    hetero_ftl_0.compile(optimizer=optimizers.Adam(lr=0.01))
    evaluation_0 = Evaluation(name='evaluation_0', eval_type="binary")

    pipeline.add_component(reader_0)
    pipeline.add_component(dataio_0, data=Data(data=reader_0.output.data))
    pipeline.add_component(hetero_ftl_0,
                           data=Data(train_data=dataio_0.output.data))
    pipeline.add_component(evaluation_0,
                           data=Data(data=hetero_ftl_0.output.data))

    pipeline.compile()

    job_parameters = JobParameters(backend=backend, work_mode=work_mode)
    pipeline.fit(job_parameters)
    """
Пример #5
0
epochs = 5
batch_size = 10000
retrain_existing_model = True
k = 10  # number of nearest neighbors to consider per layer

# load data
X, Y = tools.data_loader.load_data()

# training

# Initialize the constructor
model = Sequential()

# Add an input layer
model.add(Dense(units=2,
                kernel_initializer=initializers.RandomNormal(stddev=10.0*settings.nrOfPixels**(-.5)),
                input_shape=(settings.nrOfPixels,),
                name='Dense1_2'))

model.add(Activation('relu', name='relu'))  # for testing: can be replaced by adding activation in previous Dense

# Add an output layer
model.add(Dense(units=Y[settings.kinds[0]].shape[-1],
                kernel_initializer=initializers.RandomNormal(stddev=10.0*settings.nrOfPixels**(-.5)),
                name='Dense2_2'))

model.add(Activation('softmax', name='softmax'))

model.compile(loss='mean_squared_error',
              optimizer='adam',
              metrics=['accuracy'])
Пример #6
0
    def __init__(self,
                 inshape,
                 pheno_input_shape,
                 nb_unet_features=None,
                 src_feats=1,
                 conv_image_shape=None,
                 conv_size=3,
                 conv_nb_levels=0,
                 conv_nb_features=32,
                 extra_conv_layers=3,
                 use_mean_stream=True,
                 mean_cap=100,
                 templcondsi=False,
                 templcondsi_init=None,
                 **kwargs):
        """ 
        Parameters:
            inshape: Input shape. e.g. (192, 192, 192)
            pheno_input_shape: Pheno data input shape. e.g. (2)
            nb_unet_features: Unet convolutional features. See VxmDense documentation for more information.
            src_feats: Number of source (atlas) features. Default is 1.
            conv_image_shape: Intermediate phenotype image shape. Default is inshape with conv_nb_features.
            conv_size: Atlas generator convolutional kernel size. Default is 3.
            conv_nb_levels: Number of levels in atlas generator unet. Default is 0.
            conv_nb_features: Number of features in atlas generator convolutions. Default is 32.
            extra_conv_layers: Number of extra convolutions after unet in atlas generator. Default is 3.
            use_mean_stream: Return mean stream layer for training. Default is True.
            mean_cap: Cap for mean stream. Default is 100.
            templcondsi: Default is False.
            templcondsi_init: Default is None.
            kwargs: Forwarded to the internal VxmDense model.
        """

        if conv_image_shape is None:
            conv_image_shape = (*inshape, conv_nb_features)

        # build initial dense pheno to image shape model
        pheno_input = KL.Input(pheno_input_shape, name='pheno_input')
        pheno_dense = KL.Dense(np.prod(conv_image_shape),
                               activation='elu')(pheno_input)
        pheno_reshaped = KL.Reshape(conv_image_shape,
                                    name='pheno_reshape')(pheno_dense)
        pheno_init_model = tf.keras.models.Model(pheno_input, pheno_reshaped)

        # build model to decode reshaped pheno
        pheno_decoder_model = ne.models.conv_dec(
            conv_nb_features,
            conv_image_shape,
            conv_nb_levels,
            conv_size,
            nb_labels=conv_nb_features,
            final_pred_activation='linear',
            input_model=pheno_init_model,
            name='atlas_decoder')

        # add extra convolutions
        Conv = getattr(KL, 'Conv%dD' % len(inshape))
        last = pheno_decoder_model.output
        for n in range(extra_conv_layers):
            last = Conv(conv_nb_features,
                        kernel_size=conv_size,
                        padding='same',
                        name='atlas_extra_conv_%d' % n)(last)

        # final convolution to get atlas features
        atlas_gen = Conv(src_feats,
                         kernel_size=3,
                         padding='same',
                         name='atlas_gen',
                         kernel_initializer=KI.RandomNormal(mean=0.0,
                                                            stddev=1e-7),
                         bias_initializer=KI.RandomNormal(mean=0.0,
                                                          stddev=1e-7))(last)

        # image input layers
        atlas_input = tf.keras.Input((*inshape, src_feats), name='atlas_input')
        source_input = tf.keras.Input((*inshape, src_feats),
                                      name='source_input')

        if templcondsi:
            atlas_tensor = KL.Add(name='atlas_tmp')([atlas_input, pout])
            # change first channel to be result from seg with another add layer
            tmp_layer = KL.Lambda(lambda x: K.softmax(x[..., 1:]))(
                atlas_tensor)
            conv_layer = Conv(1,
                              kernel_size=1,
                              padding='same',
                              use_bias=False,
                              name='atlas_gen',
                              kernel_initializer=KI.RandomNormal(mean=0,
                                                                 stddev=1e-5))
            x_img = conv_layer(tmp_layer)
            if templcondsi_init is not None:
                weights = conv_layer.get_weights()
                weights[0] = templcondsi_init.reshape(weights[0].shape)
                conv_layer.set_weights(weights)
            atlas_tensor = KL.Lambda(
                lambda x: K.concatenate([x[0], x[1][..., 1:]]),
                name='atlas')([x_img, atlas_tensor])
        else:
            atlas_tensor = KL.Add(name='atlas')([atlas_input, atlas_gen])

        # build complete pheno to atlas model
        pheno_model = tf.keras.models.Model(
            [pheno_decoder_model.input, atlas_input], atlas_tensor)

        inputs = [pheno_decoder_model.input, atlas_input, source_input]
        warp_input_model = tf.keras.Model(inputs=inputs,
                                          outputs=[atlas_tensor, source_input])

        # warp model
        vxm_model = VxmDense(inshape,
                             nb_unet_features=nb_unet_features,
                             bidir=True,
                             input_model=warp_input_model,
                             **kwargs)

        # extract tensors from stacked model
        y_source = vxm_model.references.y_source
        pos_flow = vxm_model.references.pos_flow
        neg_flow = vxm_model.references.neg_flow

        if use_mean_stream:
            # get mean stream from negative flow
            mean_stream = ne.layers.MeanStream(name='mean_stream',
                                               cap=mean_cap)(neg_flow)
            outputs = [y_source, mean_stream, pos_flow, pos_flow]
        else:
            outputs = [y_source, pos_flow, pos_flow]

        # initialize the keras model
        super().__init__(inputs=inputs, outputs=outputs)
Пример #7
0
    def __init__(self,
                 config,
                 latent_code_garms_sz=1024,
                 garmparams_sz=config.PCA_,
                 name=None):
        super(PoseShapeOffsetModel, self).__init__(name=name)

        self.config = config
        self.latent_code_garms_sz = latent_code_garms_sz
        self.garmparams_sz = garmparams_sz
        self.latent_code_betas_sz = 128

        ##ToDo: Minor: Remove hard coded colors. Should be same as rendered colors in input
        self.colormap = tf.cast([
            np.array([255, 255, 255]),
            np.array([65, 0, 65]),
            np.array([0, 65, 65]),
            np.array([145, 65, 0]),
            np.array([145, 0, 65]),
            np.array([0, 145, 65])
        ], tf.float32) / 255.
        with open('assets/hresMapping.pkl', 'rb') as f:
            # python2:
            if sys.version_info[0] == 2:
                _, self.faces = pkl.load(f)
            # python3:
            else:
                _, self.faces = pkl.load(f, encoding='latin1')
        self.faces = np.int32(self.faces)

        ## Define network layers
        self.top_ = SingleImageNet(self.latent_code_garms_sz,
                                   self.latent_code_betas_sz)

        for n in self.config.garmentKeys:
            gn = GarmentNet(self.config.PCA_, n, self.garmparams_sz)
            self.garmentModels.append(gn)
        self.smpl = SMPL('assets/neutral_smpl.pkl',
                         theta_in_rodrigues=False,
                         theta_is_perfect_rotmtx=False,
                         isHres=True,
                         scale=True)
        self.smpl_J = SmplBody25Layer(theta_in_rodrigues=False,
                                      theta_is_perfect_rotmtx=False,
                                      isHres=True)
        self.J_layers = [NameLayer('J_{}'.format(i)) for i in range(NUM)]

        self.lat_betas = Dense(self.latent_code_betas_sz,
                               kernel_initializer=initializers.RandomNormal(
                                   0, 0.00005),
                               activation='relu')
        self.betas = Dense(10,
                           kernel_initializer=initializers.RandomNormal(
                               0, 0.000005),
                           name='betas')

        init_trans = np.array([0, 0.2, -2.])
        init_pose = np.load('assets/mean_a_pose.npy')
        init_pose[:3] = 0
        init_pose = tf.reshape(
            batch_rodrigues(init_pose.reshape(-1, 3).astype(np.float32)),
            (-1, ))
        self.pose_trans = tf.concat((init_pose, init_trans), axis=0)

        self.lat_pose = Dense(self.latent_code_betas_sz,
                              kernel_initializer=initializers.RandomNormal(
                                  0, 0.000005),
                              activation='relu')
        self.lat_pose_layer = Dense(
            24 * 3 * 3 + 3,
            kernel_initializer=initializers.RandomNormal(0, 0.000005),
            name='pose_trans')
        self.cut_trans = Lambda(lambda z: z[:, -3:])
        self.trans_layers = [
            NameLayer('trans_{}'.format(i)) for i in range(NUM)
        ]

        self.cut_poses = Lambda(lambda z: z[:, :-3])
        self.reshape_pose = Reshape((24, 3, 3))
        self.pose_layers = [NameLayer('pose_{}'.format(i)) for i in range(NUM)]

        ## Optional: Condition garment on betas, probably not
        self.latent_code_offset_ShapeMerged = Dense(self.latent_code_garms_sz +
                                                    self.latent_code_betas_sz,
                                                    activation='relu')
        self.latent_code_offset_ShapeMerged_2 = Dense(
            self.latent_code_garms_sz + self.latent_code_betas_sz,
            activation='relu',
            name='latent_code_offset_ShapeMerged')

        self.avg = Average()
        self.flatten = Flatten()
        self.concat = Concatenate()

        self.scatters = []
        for vs in self.vertSpread:
            self.scatters.append(Scatter_(vs, self.config.NVERTS))
Пример #8
0
def resnet50(num_classes,
             batch_size=None,
             use_l2_regularizer=True,
             rescale_inputs=False):
    """Instantiates the ResNet50 architecture.

  Args:
    num_classes: `int` number of classes for image classification.
    batch_size: Size of the batches for each step.
    use_l2_regularizer: whether to use L2 regularizer on Conv/Dense layer.
    rescale_inputs: whether to rescale inputs from 0 to 1.

  Returns:
      A Keras model instance.
  """
    input_shape = (224, 224, 3)
    img_input = layers.Input(shape=input_shape, batch_size=batch_size)
    if rescale_inputs:
        # Hub image modules expect inputs in the range [0, 1]. This rescales these
        # inputs to the range expected by the trained model.
        x = layers.Lambda(lambda x: x * 255.0 - backend.constant(
            imagenet_preprocessing_ineffecient_input_pipeline.CHANNEL_MEANS,
            shape=[1, 1, 3],
            dtype=x.dtype),
                          name='rescale')(img_input)
    else:
        x = img_input

    if backend.image_data_format() == 'channels_first':
        x = layers.Lambda(
            lambda x: backend.permute_dimensions(x, (0, 3, 1, 2)),
            name='transpose')(x)
        bn_axis = 1
    else:  # channels_last
        bn_axis = 3

    x = layers.ZeroPadding2D(padding=(3, 3), name='conv1_pad')(x)
    x = layers.Conv2D(
        64, (7, 7),
        strides=(2, 2),
        padding='valid',
        use_bias=False,
        kernel_initializer='he_normal',
        kernel_regularizer=_gen_l2_regularizer(use_l2_regularizer),
        name='conv1')(x)
    x = layers.BatchNormalization(axis=bn_axis,
                                  momentum=BATCH_NORM_DECAY,
                                  epsilon=BATCH_NORM_EPSILON,
                                  name='bn_conv1')(x)
    x = layers.Activation('relu')(x)
    x = layers.MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x)

    x = conv_block(x,
                   3, [64, 64, 256],
                   stage=2,
                   block='a',
                   strides=(1, 1),
                   use_l2_regularizer=use_l2_regularizer)
    x = identity_block(x,
                       3, [64, 64, 256],
                       stage=2,
                       block='b',
                       use_l2_regularizer=use_l2_regularizer)
    x = identity_block(x,
                       3, [64, 64, 256],
                       stage=2,
                       block='c',
                       use_l2_regularizer=use_l2_regularizer)

    x = conv_block(x,
                   3, [128, 128, 512],
                   stage=3,
                   block='a',
                   use_l2_regularizer=use_l2_regularizer)
    x = identity_block(x,
                       3, [128, 128, 512],
                       stage=3,
                       block='b',
                       use_l2_regularizer=use_l2_regularizer)
    x = identity_block(x,
                       3, [128, 128, 512],
                       stage=3,
                       block='c',
                       use_l2_regularizer=use_l2_regularizer)
    x = identity_block(x,
                       3, [128, 128, 512],
                       stage=3,
                       block='d',
                       use_l2_regularizer=use_l2_regularizer)

    x = conv_block(x,
                   3, [256, 256, 1024],
                   stage=4,
                   block='a',
                   use_l2_regularizer=use_l2_regularizer)
    x = identity_block(x,
                       3, [256, 256, 1024],
                       stage=4,
                       block='b',
                       use_l2_regularizer=use_l2_regularizer)
    x = identity_block(x,
                       3, [256, 256, 1024],
                       stage=4,
                       block='c',
                       use_l2_regularizer=use_l2_regularizer)
    x = identity_block(x,
                       3, [256, 256, 1024],
                       stage=4,
                       block='d',
                       use_l2_regularizer=use_l2_regularizer)
    x = identity_block(x,
                       3, [256, 256, 1024],
                       stage=4,
                       block='e',
                       use_l2_regularizer=use_l2_regularizer)
    x = identity_block(x,
                       3, [256, 256, 1024],
                       stage=4,
                       block='f',
                       use_l2_regularizer=use_l2_regularizer)

    x = conv_block(x,
                   3, [512, 512, 2048],
                   stage=5,
                   block='a',
                   use_l2_regularizer=use_l2_regularizer)
    x = identity_block(x,
                       3, [512, 512, 2048],
                       stage=5,
                       block='b',
                       use_l2_regularizer=use_l2_regularizer)
    x = identity_block(x,
                       3, [512, 512, 2048],
                       stage=5,
                       block='c',
                       use_l2_regularizer=use_l2_regularizer)

    rm_axes = [1, 2
               ] if backend.image_data_format() == 'channels_last' else [2, 3]
    x = layers.Lambda(lambda x: backend.mean(x, rm_axes),
                      name='reduce_mean')(x)
    x = layers.Dense(
        num_classes,
        kernel_initializer=initializers.RandomNormal(stddev=0.01),
        kernel_regularizer=_gen_l2_regularizer(use_l2_regularizer),
        bias_regularizer=_gen_l2_regularizer(use_l2_regularizer),
        name='fc1000')(x)

    # A softmax that is followed by the model loss must be done cannot be done
    # in float16 due to numeric issues. So we pass dtype=float32.
    x = layers.Activation('softmax', dtype='float32')(x)

    # Create model.
    return models.Model(img_input, x, name='resnet50')
Пример #9
0
    def __init__(self,
                 nb_filters=64,
                 kernel_size=3,
                 nb_stacks=1,
                 dilations=None,
                 padding='same',
                 use_skip_connections=False,
                 dropout_rate=0.0,
                 conv_regularization=0.08):
        """Creates a TCN layer.
               Input shape:
                   A tensor of shape (batch size, time steps, features).
               Args:
                   nb_filters: The number of filters to use in the convolutional layers.
                   kernel_size: The size of the kernel to use in each convolutional layer.
                   nb_stacks : The number of stacks of residual blocks to use.
                   dilations: The list of the dilations. Example is: [1, 2, 4, 8, 16, 32, 64].
                   padding: The padding to use in the convolutional layers, 'causal' or 'same'.
                   use_skip_connections: Boolean. If we want to add skip connections from input to each residual block.
                   dropout_rate: Float between 0 and 1. Fraction of the input units to drop.
                   conv_regularization: Float, L2 regularization coefficient for conv layers.
               Returns:
                   A TCN layer.
           """
        super(TCN, self).__init__()
        self.dropout_rate = dropout_rate
        self.use_skip_connections = use_skip_connections
        self.dilations = dilations
        self.nb_stacks = nb_stacks
        self.kernel_size = kernel_size
        self.nb_filters = nb_filters
        self.padding = padding
        self.conv_regularization = conv_regularization

        self.tcn_blocks = []

        for s in range(self.nb_stacks):
            for d in self.dilations:
                self.tcn_blocks.append(
                    ResidualBlock(
                        dilation_rate=d,
                        nb_filters=self.nb_filters,
                        kernel_size=self.kernel_size,
                        padding=self.padding,
                        dropout_rate=self.dropout_rate,
                        conv_regularization=self.conv_regularization))
        self.fc1 = layers.Dense(self.nb_filters // 2,
                                kernel_initializer=initializers.RandomNormal(
                                    0, 0.01))
        self.dropout1 = layers.Dropout(self.dropout_rate)
        self.fc2 = layers.Dense(
            self.nb_filters // 4,
            kernel_initializer=initializers.RandomNormal(0, 0.01),
            bias_initializer=initializers.RandomNormal(0, 0.000001))
        self.dropout2 = layers.Dropout(self.dropout_rate)
        self.reshape = layers.Reshape(
            [Constants._TCN_LENGTH * self.nb_filters // 4])
        self.fc = layers.Dense(
            Constants._NUM_TARGETS,
            kernel_initializer=initializers.RandomNormal(0, 0.01),
            bias_initializer=initializers.RandomNormal(0, 0.000001))
Пример #10
0
 def build(self, input_shape):
     self.x_shape, _ = input_shape
     self.s = self.add_weight(name="noise_scale",
                              shape=[1, 1, self.x_shape[-1]],
                              initializer=initer.RandomNormal(0, 0.05))
Пример #11
0
import numpy as np
import basic_nn
from basic_nn import fully_connected_nn, sigmoid_act, tanh_act, leaky_relu_act
from basic_model import basicModel
from costfcn import gmm_likelihood_simplex, entropy_discriminator_cost, gmm_nll_cost, gmm_mce_cost
from tensorflow_probability import distributions as tfd
from util import sample_gmm

tf.compat.v1.disable_eager_execution()
if tf.__version__ < '2.0.0':
    import tflearn
    w_init = tflearn.initializations.normal(stddev=0.003, seed=42)
    w_init_dis = tflearn.initializations.normal(stddev=0.1, seed=42)
else:
    from tensorflow.keras import initializers
    w_init = initializers.RandomNormal(stddev=0.003, seed=42)
    w_init_dis = initializers.RandomNormal(stddev=0.1, seed=42)


class GMGAN:
    def __init__(self,
                 n_comps,
                 context_dim,
                 response_dim,
                 nn_structure,
                 batch_size=None,
                 using_batch_norm=False,
                 seed=42,
                 eps=1e-20,
                 gen_sup_lrate=0.001,
                 gen_adv_lrate=0.001,
Пример #12
0
    def __call__(self, training=True):
        
        model = Sequential()
        
        self.G.add(Dense(1024, input_shape=(self.noise_size,)))
        self.G.add(BatchNormalization(momentum = 0.9))
        self.G.add(Activation('relu'))
        self.G.add(Reshape((1,1,1024))
        
        self.G.add(Conv2D(512, kernel_initializer=initializers.RandomNormal(0, 0.02), kernel_size=5, strides=(2,2)))
        self.G.add(BatchNormalization(momentum = 0.9))
        self.G.add(Activation('relu'))
        self.G.add(UpSampling2D())
        
        self.G.add(Conv2D(256, kernel_initializer=RandomNormal(mean=0, stddev=0.02), kernel_size=5, strides=(2,2)))
        self.G.add(BatchNormalization(momentum = 0.9))
        self.G.add(Activation('relu'))
        self.G.add(UpSampling2D())
        
        self.G.add(Conv2D(128, kernel_initializer=RandomNormal(mean=0, stddev=0.02), kernel_size=5, strides=(2,2)))
        self.G.add(BatchNormalization(momentum = 0.9))
        self.G.add(Activation('relu'))
        self.G.add(UpSampling2D())
        
        self.G.add(Conv2D(1, kernel_initializer=RandomNormal(mean=0, stddev=0.02), kernel_size=5, strides=(2,2)))
        self.G.add(Activation('tanh'))
        self.G.add(UpSampling2D())
        
        return self.G
    

class Discriminator(tf.keras.Model):
    
    def __init__(self):
        super(Discriminator, self).__init__()
        
        self.input_size = (64, 64, 1)
    
    def __call__(self):
        
        self.D = Sequential()
        
        self.D.add(Conv2D(128, kernel_initializer=RandomNormal(mean=0, stddev=0.02),  kernel_size=5, strides=(2,2)))
        self.D.add(BatchNormalization(momentum = 0.9))
        self.D.add(LeakyReLU(0.2))
        
        self.D.add(Conv2D(256, kernel_initializer=RandomNormal(mean=0, stddev=0.02), kernel_size=5, strides=(2,2)))
        self.D.add(BatchNormalization(momentum = 0.9))
        self.D.add(LeakyReLU(0.2))
        
        self.D.add(Conv2D(512, kernel_initializer=RandomNormal(mean=0, stddev=0.02), kernel_size=5, strides=(2,2)))
        self.D.add(BatchNormalization(momentum = 0.9))
        self.D.add(LeakyReLU(0.2))
        
        self.D.add(Conv2D(1024, kernel_initializer=RandomNormal(mean=0, stddev=0.02), kernel_size=5, strides=(2,2)))
        self.D.add(BatchNormalization(momentum = 0.9))
        self.D.add(LeakyReLU(0.2))
        
        self.D.add(Flatten())
        self.D.add(Dense(1))
        self.D.add(Activation('sigmoid'))
        
        return self.D
        

def discriminator_loss(loss_object, real_output, fake_output):
    #here = tf.ones_like(????) or tf.zeros_like(????)  -> tf.zeros_like와 tf.ones_like에서 선택하고 (???)채워주세요
    real_loss = loss_object(tf.ones_like((batch_size,1)), real_output)
    fake_loss = loss_object(tf.ones_like((batch_size,1)), fake_output)
    total_loss = real_loss + fake_loss
    return total_loss


def generator_loss(loss_object, fake_output):
    return loss_object(tf.ones_like((batch_size,1)), fake_output)

def normalize(x):
    image = tf.cast(x['image'], tf.float32)
    image = (image / 127.5) - 1
    return image


def save_imgs(epoch, generator, noise):
    gen_imgs = generator(noise, training=False)

    fig = plt.figure(figsize=(4, 4))

    for i in range(gen_imgs.shape[0]):
        plt.subplot(4, 4, i + 1)
        plt.imshow(gen_imgs[i, :, :, 0] * 127.5 + 127.5, cmap='gray')
        plt.axis('off')

    fig.savefig("images/mnist_%d.png" % epoch)

def train():
    data, info = tfds.load("mnist", with_info=True, data_dir='/data/tensorflow_datasets')
    train_data = data['train']

    if not os.path.exists('./images'):
        os.makedirs('./images')

    # settting hyperparameter
    latent_dim = 100
    epochs = 2
    batch_size = 10000
    buffer_size = 6000
    save_interval = 1

    generator = Generator()
    discriminator = Discriminator()

    gen_optimizer = tf.keras.optimizers.Adam(learning_rate=0.0002, beta_1 = 0.5, beta_2 = 0.999) #beta_2 : default
    disc_optimizer = tf.keras.optimizers.Adam(learning_rate=0.0002, beta_1 = 0.5, beta_2 = 0.999) #beta_2 : default

    train_dataset = train_data.map(normalize).shuffle(buffer_size).batch(batch_size)

    cross_entropy = tf.keras.losses.BinaryCrossentropy() #진짜는 1, 가짜는 0

    @tf.function
    def train_step(images):
        noise = tf.random.normal([batch_size, latent_dim]) #z

        with tf.GradientTape(persistent=True) as tape:
            generated_images = generator(noise)

            real_output = discriminator(images)
            generated_output = discriminator(generated_images)

            gen_loss = generator_loss(cross_entropy, generated_images)
            disc_loss = discriminator_loss(cross_entropy, real_output, generated_output)

        grad_gen = tape.gradient(gen_loss, generator.trainable_variables)
        grad_disc = tape.gradient(disc_loss, discriminator.trainable_variables)

        gen_optimizer.apply_gradients(zip(grad_gen, generator.trainable_variables))
        disc_optimizer.apply_gradients(zip(grad_disc, discriminator.trainable_variables))

        return gen_loss, disc_loss

    seed = tf.random.normal([16, latent_dim])

    for epoch in range(epochs):
        start = time.time()
        total_gen_loss = 0
        total_disc_loss = 0

        for images in train_dataset:
            gen_loss, disc_loss = train_step(images)

            total_gen_loss += gen_loss
            total_disc_loss += disc_loss

        print('Time for epoch {} is {} sec - gen_loss = {}, disc_loss = {}'.format(epoch + 1, time.time() - start, total_gen_loss / batch_size, total_disc_loss / batch_size))
        if epoch % save_interval == 0:
            save_imgs(epoch, generator, seed)


if __name__ == "__main__":
    train()
Пример #13
0
    nn = model(x)
    a = (1 + 3 * x ** 2) / (1 + x + x ** 3)
    return x ** 3 + 2 * x + a * x ** 2 - (x + a) * nn


def custom_loss(x, y_pred):
    return tf.reduce_sum(tf.square((dgdx(x) - f(x))))


model = keras.Sequential(
    [
        layers.Dense(
            32,
            input_shape=[1],
            activation="sigmoid",
            kernel_initializer=initializers.RandomNormal(stddev=1.0),
            bias_initializer=initializers.RandomNormal(stddev=1.0),
        ),
        layers.Dense(
            32,
            activation="sigmoid",
            kernel_initializer=initializers.RandomNormal(stddev=1.0),
            bias_initializer=initializers.RandomNormal(stddev=1.0),
        ),
        layers.Dense(
            1,
            activation="sigmoid",
            kernel_initializer=initializers.RandomNormal(stddev=1.0),
            bias_initializer=initializers.RandomNormal(stddev=1.0),
        ),
    ]
generator.add(Dense(784, activation='tanh'))
#generator.compile(loss='binary_crossentropy', optimizer=adam)

# Generator:
# Total params: 1,463,312
# Trainable params: 1,463,312
# Non-trainable params: 0



# In[4]:


discriminator = Sequential()
# why are e using a different kernal initializer? default is glorot_uniform
discriminator.add(Dense(1024, input_dim=784, kernel_initializer=initializers.RandomNormal(stddev=0.02)))
discriminator.add(LeakyReLU(0.2))
discriminator.add(Dropout(0.3))
discriminator.add(Dense(512))
discriminator.add(LeakyReLU(0.2))
discriminator.add(Dropout(0.3))
discriminator.add(Dense(256))
discriminator.add(LeakyReLU(0.2))
discriminator.add(Dropout(0.3))
discriminator.add(Dense(1, activation='sigmoid'))
discriminator.compile(loss='binary_crossentropy', optimizer=adam)


# discriminator network (almost same number of parameter as generator)
# Total params: 1,460,225
# Trainable params: 0
Пример #15
0
    def __init__(self,
                 inshape,
                 nb_unet_features=None,
                 nb_unet_levels=None,
                 unet_feat_mult=1,
                 nb_unet_conv_per_level=1,
                 int_steps=7,
                 int_downsize=2,
                 bidir=False,
                 use_probs=False,
                 src_feats=1,
                 trg_feats=1,
                 unet_half_res=False,
                 input_model=None):
        """ 
        Parameters:
            inshape: Input shape. e.g. (192, 192, 192)
            nb_unet_features: Unet convolutional features. Can be specified via a list of lists with
                the form [[encoder feats], [decoder feats]], or as a single integer. If None (default),
                the unet features are defined by the default config described in the unet class documentation.
            nb_unet_levels: Number of levels in unet. Only used when nb_unet_features is an integer. Default is None.
            unet_feat_mult: Per-level feature multiplier. Only used when nb_unet_features is an integer. Default is 1.
            nb_unet_conv_per_level: Number of convolutions per unet level. Default is 1.
            int_steps: Number of flow integration steps. The warp is non-diffeomorphic when this value is 0.
            int_downsize: Integer specifying the flow downsample factor for vector integration. The flow field
                is not downsampled when this value is 1.
            bidir: Enable bidirectional cost function. Default is False.
            use_probs: Use probabilities in flow field. Default is False.
            src_feats: Number of source image features. Default is 1.
            trg_feats: Number of target image features. Default is 1.
            unet_half_res: Skip the last unet decoder upsampling. Requires that int_downsize=2. Default is False.
            input_model: Model to replace default input layer before concatenation. Default is None.
        """

        # ensure correct dimensionality
        ndims = len(inshape)
        assert ndims in [
            1, 2, 3
        ], 'ndims should be one of 1, 2, or 3. found: %d' % ndims

        if input_model is None:
            # configure default input layers if an input model is not provided
            source = tf.keras.Input(shape=(*inshape, src_feats),
                                    name='source_input')
            target = tf.keras.Input(shape=(*inshape, trg_feats),
                                    name='target_input')
            input_model = tf.keras.Model(inputs=[source, target],
                                         outputs=[source, target])
        else:
            source, target = input_model.outputs[:2]

        # build core unet model and grab inputs
        unet_model = Unet(input_model=input_model,
                          nb_features=nb_unet_features,
                          nb_levels=nb_unet_levels,
                          feat_mult=unet_feat_mult,
                          nb_conv_per_level=nb_unet_conv_per_level,
                          half_res=unet_half_res)

        # transform unet output into a flow field
        Conv = getattr(KL, 'Conv%dD' % ndims)
        flow_mean = Conv(ndims,
                         kernel_size=3,
                         padding='same',
                         kernel_initializer=KI.RandomNormal(mean=0.0,
                                                            stddev=1e-5),
                         name='flow')(unet_model.output)

        # optionally include probabilities
        if use_probs:
            # initialize the velocity variance very low, to start stable
            flow_logsigma = Conv(ndims,
                                 kernel_size=3,
                                 padding='same',
                                 kernel_initializer=KI.RandomNormal(
                                     mean=0.0, stddev=1e-10),
                                 bias_initializer=KI.Constant(value=-10),
                                 name='log_sigma')(unet_model.output)
            flow_params = KL.concatenate([flow_mean, flow_logsigma],
                                         name='prob_concat')
            flow = ne.layers.SampleNormalLogVar(name="z_sample")(
                [flow_mean, flow_logsigma])
        else:
            flow_params = flow_mean
            flow = flow_mean

        if not unet_half_res:
            # optionally resize for integration
            if int_steps > 0 and int_downsize > 1:
                flow = layers.RescaleTransform(1 / int_downsize,
                                               name='flow_resize')(flow)

        preint_flow = flow

        # optionally negate flow for bidirectional model
        pos_flow = flow
        if bidir:
            neg_flow = ne.layers.Negate(name='neg_flow')(flow)

        # integrate to produce diffeomorphic warp (i.e. treat flow as a stationary velocity field)
        if int_steps > 0:
            pos_flow = layers.VecInt(method='ss',
                                     name='flow_int',
                                     int_steps=int_steps)(pos_flow)
            if bidir:
                neg_flow = layers.VecInt(method='ss',
                                         name='neg_flow_int',
                                         int_steps=int_steps)(neg_flow)

            # resize to final resolution
            if int_downsize > 1:
                pos_flow = layers.RescaleTransform(int_downsize,
                                                   name='diffflow')(pos_flow)
                if bidir:
                    neg_flow = layers.RescaleTransform(
                        int_downsize, name='neg_diffflow')(neg_flow)

        # warp image with flow field
        y_source = layers.SpatialTransformer(interp_method='linear',
                                             indexing='ij',
                                             name='transformer')(
                                                 [source, pos_flow])
        if bidir:
            y_target = layers.SpatialTransformer(interp_method='linear',
                                                 indexing='ij',
                                                 name='neg_transformer')(
                                                     [target, neg_flow])

        # initialize the keras model
        outputs = [y_source, y_target, preint_flow
                   ] if bidir else [y_source, preint_flow]
        super().__init__(name='vxm_dense',
                         inputs=input_model.inputs,
                         outputs=outputs)

        # cache pointers to layers and tensors for future reference
        self.references = LoadableModel.ReferenceContainer()
        self.references.unet_model = unet_model
        self.references.y_source = y_source
        self.references.y_target = y_target if bidir else None
        self.references.pos_flow = pos_flow
        self.references.neg_flow = neg_flow if bidir else None
Пример #16
0
                      activation="relu"))
    model.add(
        layers.MaxPool2D(pool_size=(3, 3), strides=(2, 2), padding="valid"))
    #model.add(Conv2D(filters=384, kernel_size=(3,3), strides=(1,1), padding="same", activation = "relu"))
    #model.add(Conv2D(filters=384, kernel_size=(3,3), strides=(1,1), padding="same", activation = "relu"))
    #model.add(Conv2D(filters=256, kernel_size=(3,3), strides=(1,1), padding="same", activation = "relu"))
    #model.add(MaxPool2D(pool_size=(3,3), strides=(2,2), padding="valid"))

    # Passing it to a Fully Connected layer
    model.add(layers.Flatten())
    # FC Layers
    model.add(
        layers.Dense(
            units=10,
            activation="relu",
            kernel_initializer=initializers.RandomNormal(stddev=0.01)))
    #model.add(layers.Dense(units = 10, activation = "relu", kernel_initializer=initializers.RandomNormal(stddev=0.01)))
    #model.add(layers.Dense(10, activation = "relu", kernel_initializer=initializers.RandomNormal(stddev=0.01)))

    # Output Layer
    model.add(
        layers.Dense(
            num_outputs,
            activation="sigmoid",
            kernel_initializer=initializers.RandomNormal(stddev=0.01)))

    model.summary()

    print("Compiling model...")
    model.compile(loss="categorical_crossentropy",
                  optimizer="adam",
Пример #17
0
    def __init__(self,
                 inshape,
                 nb_labels,
                 nb_unet_features=None,
                 init_mu=None,
                 init_sigma=None,
                 warp_atlas=True,
                 stat_post_warp=True,
                 stat_nb_feats=16,
                 network_stat_weight=0.001,
                 **kwargs):
        """ 
        Parameters:
            inshape: Input shape. e.g. (192, 192, 192)
            nb_labels: Number of labels in probabilistic atlas.
            nb_unet_features: Unet convolutional features. See VxmDense documentation for more information.
            init_mu: Optional initialization for gaussian means. Default is None.
            init_sigma: Optional initialization for gaussian sigmas. Default is None.
            stat_post_warp: Computes gaussian stats using the warped atlas. Default is True.
            stat_nb_feats: Number of features in the stats convolutional layer. Default is 16.
            network_stat_weight: Relative weight of the stats learned by the network. Default is 0.001.
            kwargs: Forwarded to the internal VxmDense model.
        """

        # ensure correct dimensionality
        ndims = len(inshape)
        assert ndims in [
            1, 2, 3
        ], 'ndims should be one of 1, 2, or 3. found: %d' % ndims

        # build warp network
        vxm_model = VxmDense(inshape,
                             nb_unet_features=nb_unet_features,
                             src_feats=nb_labels,
                             **kwargs)

        # extract necessary layers from the network
        # important to note that we're warping the atlas to the image in this case and
        # we'll swap the input order later
        atlas, image = vxm_model.inputs
        warped_atlas = vxm_model.references.y_source if warp_atlas else atlas
        flow = vxm_model.references.pos_flow

        # compute stat using the warped atlas (or not)
        if stat_post_warp:
            assert warp_atlas, 'must enable warp_atlas if computing stat post warp'
            combined = KL.concatenate([warped_atlas, image],
                                      name='post_warp_concat')
        else:
            # use last convolution in the unet before the flow convolution
            combined = vxm_model.references.unet_model.layers[-2].output

        # convolve into nlabel-stat volume
        conv = _conv_block(combined, stat_nb_feats)
        conv = _conv_block(conv, nb_labels)

        Conv = getattr(KL, 'Conv%dD' % ndims)
        weaknorm = KI.RandomNormal(mean=0.0, stddev=1e-5)

        # convolve into mu and sigma volumes
        stat_mu_vol = Conv(nb_labels,
                           kernel_size=3,
                           name='mu_vol',
                           kernel_initializer=weaknorm,
                           bias_initializer=weaknorm)(conv)
        stat_logssq_vol = Conv(nb_labels,
                               kernel_size=3,
                               name='logsigmasq_vol',
                               kernel_initializer=weaknorm,
                               bias_initializer=weaknorm)(conv)

        # pool to get 'final' stat
        stat_mu = KL.GlobalMaxPooling3D(name='mu_pooling')(stat_mu_vol)
        stat_logssq = KL.GlobalMaxPooling3D(
            name='logssq_pooling')(stat_logssq_vol)

        # combine mu with initialization
        if init_mu is not None:
            init_mu = np.array(init_mu)
            stat_mu = KL.Lambda(lambda x: network_stat_weight * x + init_mu,
                                name='comb_mu')(stat_mu)

        # combine sigma with initialization
        if init_sigma is not None:
            init_logsigmasq = np.array([2 * np.log(f) for f in init_sigma])
            stat_logssq = KL.Lambda(
                lambda x: network_stat_weight * x + init_logsigmasq,
                name='comb_sigma')(stat_logssq)

        # unnorm loglike
        def unnorm_loglike(I, mu, logsigmasq, use_log=True):
            P = tf.distributions.Normal(mu, K.exp(logsigmasq / 2))
            return P.log_prob(I) if use_log else P.prob(I)

        uloglhood = KL.Lambda(lambda x: unnorm_loglike(*x),
                              name='unsup_likelihood')(
                                  [image, stat_mu, stat_logssq])

        # compute data loss as a layer, because it's a bit easier than outputting a ton of things
        def logsum(prob_ll, atl):
            # safe computation using the log sum exp trick (NOTE: this does not normalize p)
            # https://www.xarg.org/2016/06/the-log-sum-exp-trick-in-machine-learning
            logpdf = prob_ll + K.log(atl + K.epsilon())
            alpha = tf.reduce_max(logpdf, -1, keepdims=True)
            return alpha + tf.log(
                tf.reduce_sum(K.exp(logpdf - alpha), -1, keepdims=True) +
                K.epsilon())

        loss_vol = KL.Lambda(lambda x: logsum(*x))([uloglhood, warped_atlas])

        # initialize the keras model
        super().__init__(inputs=[image, atlas], outputs=[loss_vol, flow])

        # cache pointers to layers and tensors for future reference
        self.references = LoadableModel.ReferenceContainer()
        self.references.vxm_model = vxm_model
        self.references.uloglhood = uloglhood
        self.references.stat_mu = stat_mu
        self.references.stat_logssq = stat_logssq
def build_cifar10_discriminator(ndf=64, image_shape=(32, 32, 3)):
    """ Builds CIFAR10 DCGAN Discriminator Model
    PARAMS
    ------
    ndf: number of discriminator filters
    image_shape: 32x32x3

    RETURN
    ------
    D: keras sequential
    """
    init = initializers.RandomNormal(stddev=0.02)

    D = Sequential()

    # Conv 1: 16x16x64
    D.add(
        Conv2D(ndf,
               kernel_size=5,
               strides=2,
               padding='same',
               use_bias=True,
               kernel_initializer=init,
               input_shape=image_shape))
    D.add(LeakyReLU(0.2))

    # Conv 2: 8x8x128
    D.add(
        Conv2D(ndf * 2,
               kernel_size=5,
               strides=2,
               padding='same',
               use_bias=True,
               kernel_initializer=init))
    D.add(BatchNormalization())
    D.add(LeakyReLU(0.2))

    # Conv 3: 4x4x256
    D.add(
        Conv2D(ndf * 4,
               kernel_size=5,
               strides=2,
               padding='same',
               use_bias=True,
               kernel_initializer=init))
    D.add(BatchNormalization())
    D.add(LeakyReLU(0.2))

    # Conv 4:  2x2x512
    D.add(
        Conv2D(ndf * 8,
               kernel_size=5,
               strides=2,
               padding='same',
               use_bias=True,
               kernel_initializer=init))
    D.add(BatchNormalization())
    D.add(LeakyReLU(0.2))

    # Flatten: 2x2x512 -> (2048)
    D.add(Flatten())

    # Dense Layer
    D.add(Dense(1, kernel_initializer=init))
    D.add(Activation('sigmoid'))

    print("\nDiscriminator")
    D.summary()

    return D
Пример #19
0
 def __init__(self):
     super().__init__('mlp_value')
     self.hidden1 = kl.Dense(64, activation='relu', kernel_initializer = ki.RandomNormal(mean=0.0, stddev=1e-2, seed=1))
     self.hidden2 = kl.Dense(64, activation = 'relu', kernel_initializer = ki.RandomNormal(mean=0.0, stddev=1e-2, seed=2))
     self.values = kl.Dense(1, name='critic_value', kernel_initializer = ki.RandomNormal(mean=0.0, stddev=1e-2, seed=3))
def build_cifar10_generator(ngf=64, z_dim=128):
    """ Builds CIFAR10 DCGAN Generator Model
    PARAMS
    ------
    ngf: number of generator filters
    z_dim: number of dimensions in latent vector

    RETURN
    ------
    G: keras sequential
    """
    init = initializers.RandomNormal(stddev=0.02)

    G = Sequential()

    # Dense 1: 2x2x512
    G.add(
        Dense(2 * 2 * ngf * 8,
              input_shape=(z_dim, ),
              use_bias=True,
              kernel_initializer=init))
    G.add(Reshape((2, 2, ngf * 8)))
    G.add(BatchNormalization())
    G.add(LeakyReLU(0.2))

    # Conv 1: 4x4x256
    G.add(
        Conv2DTranspose(ngf * 4,
                        kernel_size=5,
                        strides=2,
                        padding='same',
                        use_bias=True,
                        kernel_initializer=init))
    G.add(BatchNormalization())
    G.add(LeakyReLU(0.2))

    # Conv 2: 8x8x128
    G.add(
        Conv2DTranspose(ngf * 2,
                        kernel_size=5,
                        strides=2,
                        padding='same',
                        use_bias=True,
                        kernel_initializer=init))
    G.add(BatchNormalization())
    G.add(LeakyReLU(0.2))

    # Conv 3: 16x16x64
    G.add(
        Conv2DTranspose(ngf,
                        kernel_size=5,
                        strides=2,
                        padding='same',
                        use_bias=True,
                        kernel_initializer=init))
    G.add(BatchNormalization())
    G.add(LeakyReLU(0.2))

    # Conv 4: 32x32x3
    G.add(
        Conv2DTranspose(3,
                        kernel_size=5,
                        strides=2,
                        padding='same',
                        use_bias=True,
                        kernel_initializer=init))
    G.add(Activation('tanh'))

    print("\nGenerator")
    G.summary()

    return G
Пример #21
0
from sklearn.model_selection import train_test_split
from experiments.evaluate_exps import evaluate_docking
from run_baselines_for_docking import train_evaluate_baseline_for_docking
from run_mdnmp_for_docking import train_evaluate_mdnmp_for_docking
from run_gmgan_for_docking import train_evaluate_gmgan_for_docking

import tensorflow as tf

if tf.__version__ < '2.0.0':
    import tflearn
    VAR_INIT = tflearn.initializations.normal(stddev=0.003, seed=42)
    VAR_INIT_DIS = tflearn.initializations.normal(stddev=0.1, seed=42)
else:
    from tensorflow.keras import initializers
    VAR_INIT = initializers.RandomUniform(minval=-0.0003, maxval=0.0003, seed=42)
    VAR_INIT_DIS = initializers.RandomNormal(stddev=0.02, seed=42)


parser = OptionParser()
parser.add_option("-m", "--nmodel", dest="nmodel", type="int", default=3)
parser.add_option("-n", "--num_exp", dest="expnum", type="int", default=1)
parser.add_option("-d", "--result_dir", dest="result_dir", type="string", default="results_compare_docking")
(options, args) = parser.parse_args(sys.argv)


queries = np.loadtxt('../data/docking_queries.csv', delimiter=',')
vmps = np.loadtxt('../data/docking_weights.csv', delimiter=',')
starts = np.loadtxt('../data/docking_starts.csv', delimiter=',')
goals = np.loadtxt('../data/docking_goals.csv', delimiter=',')

Пример #22
0
def main(config="../../config.yaml", namespace=""):
    # obtain config
    if isinstance(config, str):
        config = load_job_config(config)
    parties = config.parties
    guest = parties.guest[0]
    host = parties.host[0]
    backend = config.backend
    work_mode = config.work_mode

    guest_train_data = {
        "name": "nus_wide_guest",
        "namespace": f"experiment{namespace}"
    }
    host_train_data = {
        "name": "nus_wide_host",
        "namespace": f"experiment{namespace}"
    }
    pipeline = PipeLine().set_initiator(role='guest',
                                        party_id=guest).set_roles(guest=guest,
                                                                  host=host)

    reader_0 = Reader(name="reader_0")
    reader_0.get_party_instance(
        role='guest', party_id=guest).component_param(table=guest_train_data)
    reader_0.get_party_instance(
        role='host', party_id=host).component_param(table=host_train_data)

    dataio_0 = DataIO(name="dataio_0")
    dataio_0.get_party_instance(role='guest', party_id=guest).component_param(
        with_label=True, output_format="dense")
    dataio_0.get_party_instance(
        role='host', party_id=host).component_param(with_label=False)

    hetero_ftl_0 = HeteroFTL(name='hetero_ftl_0',
                             epochs=10,
                             alpha=1,
                             batch_size=-1,
                             mode='plain')

    hetero_ftl_0.add_nn_layer(
        Dense(units=32,
              activation='sigmoid',
              kernel_initializer=initializers.RandomNormal(stddev=1.0,
                                                           dtype="float32"),
              bias_initializer=initializers.Zeros()))

    hetero_ftl_0.compile(optimizer=optimizers.Adam(lr=0.01))
    evaluation_0 = Evaluation(name='evaluation_0', eval_type="binary")

    pipeline.add_component(reader_0)
    pipeline.add_component(dataio_0, data=Data(data=reader_0.output.data))
    pipeline.add_component(hetero_ftl_0,
                           data=Data(train_data=dataio_0.output.data))
    pipeline.add_component(evaluation_0,
                           data=Data(data=hetero_ftl_0.output.data))

    pipeline.compile()

    job_parameters = JobParameters(backend=backend, work_mode=work_mode)
    pipeline.fit(job_parameters)

    # predict
    # deploy required components
    pipeline.deploy_component([dataio_0, hetero_ftl_0])

    predict_pipeline = PipeLine()
    # add data reader onto predict pipeline
    predict_pipeline.add_component(reader_0)
    # add selected components from train pipeline onto predict pipeline
    # specify data source
    predict_pipeline.add_component(
        pipeline,
        data=Data(
            predict_input={pipeline.dataio_0.input.data: reader_0.output.data
                           }))
    # run predict model
    predict_pipeline.predict(job_parameters)
Пример #23
0
def dense_embedding_quantized(n_features=6,
                              n_features_cat=2,
                              number_of_pupcandis=100,
                              embedding_input_dim={0: 13, 1: 3},
                              emb_out_dim=2,
                              with_bias=True,
                              t_mode=0,
                              logit_total_bits=7,
                              logit_int_bits=2,
                              activation_total_bits=7,
                              logit_quantizer='quantized_bits',
                              activation_quantizer='quantized_relu',
                              activation_int_bits=2,
                              alpha=1,
                              use_stochastic_rounding=False,
                              units=[64, 32, 16]):
    n_dense_layers = len(units)

    logit_quantizer = getattr(qkeras.quantizers, logit_quantizer)(logit_total_bits, logit_int_bits, alpha=alpha, use_stochastic_rounding=use_stochastic_rounding)
    activation_quantizer = getattr(qkeras.quantizers, activation_quantizer)(activation_total_bits, activation_int_bits)

    inputs_cont = Input(shape=(number_of_pupcandis, n_features-2), name='input')
    pxpy = Input(shape=(number_of_pupcandis, 2), name='input_pxpy')

    embeddings = []
    inputs = [inputs_cont, pxpy]
    for i_emb in range(n_features_cat):
        input_cat = Input(shape=(number_of_pupcandis, 1), name='input_cat{}'.format(i_emb))
        inputs.append(input_cat)
        embedding = Embedding(
            input_dim=embedding_input_dim[i_emb],
            output_dim=emb_out_dim,
            embeddings_initializer=initializers.RandomNormal(
                mean=0,
                stddev=0.4/emb_out_dim),
            name='embedding{}'.format(i_emb))(input_cat)
        embedding = Reshape((number_of_pupcandis, emb_out_dim))(embedding)
        embeddings.append(embedding)

    x = Concatenate()([inputs_cont] + [emb for emb in embeddings])

    for i_dense in range(n_dense_layers):
        x = QDense(units[i_dense], kernel_quantizer=logit_quantizer, bias_quantizer=logit_quantizer, kernel_initializer='lecun_uniform')(x)
        x = BatchNormalization(momentum=0.95)(x)
        x = QActivation(activation=activation_quantizer)(x)

    if t_mode == 0:
        x = qkeras.qpooling.QGlobalAveragePooling1D(name='pool', quantizer=logit_quantizer)(x)
        # pool size?
        outputs = QDense(2, name='output', bias_quantizer=logit_quantizer, kernel_quantizer=logit_quantizer, activation='linear')(x)

    if t_mode == 1:
        if with_bias:
            b = QDense(2, name='met_bias', kernel_quantizer=logit_quantizer, bias_quantizer=logit_quantizer, kernel_initializer=initializers.VarianceScaling(scale=0.02))(x)
            b = QActivation(activation='linear')(b)
            pxpy = Add()([pxpy, b])
        w = QDense(1, name='met_weight', kernel_quantizer=logit_quantizer, bias_quantizer=logit_quantizer, kernel_initializer=initializers.VarianceScaling(scale=0.02))(x)
        w = QActivation(activation='linear')(w)
        w = BatchNormalization(trainable=False, name='met_weight_minus_one', epsilon=False)(w)
        x = Multiply()([w, pxpy])

        x = GlobalAveragePooling1D(name='output')(x)
    outputs = x

    keras_model = Model(inputs=inputs, outputs=outputs)

    keras_model.get_layer('met_weight_minus_one').set_weights([np.array([1.]), np.array([-1.]), np.array([0.]), np.array([1.])])

    return keras_model
Пример #24
0
    # 訓練網路模型
    model.fit(train_data,
              epochs=100,
              validation_data=valid_data,
              callbacks=[model_cbk, model_mckp])


session_num = 1
# 設定儲存權重目錄
model_dir = 'lab5-logs/models/'
if not os.path.exists(model_dir):
    os.makedirs(model_dir)

# 設定要測試的三種初始化方法
weights_initialization_list = [
    initializers.RandomNormal(0, 0.01),
    initializers.glorot_normal(),
    initializers.he_normal()
]

for init in weights_initialization_list:
    print('--- Running training session %d' % (session_num))
    run_name = "run-%d" % session_num
    build_and_train_model(run_name, init)  # 創建和訓練網路
    session_num += 1

# 比較三種初始化的訓練結果
model_1 = keras.models.load_model('lab5-logs/models/run-1-best-model.h5')
model_2 = keras.models.load_model('lab5-logs/models/run-2-best-model.h5')
model_3 = keras.models.load_model('lab5-logs/models/run-3-best-model.h5')
loss_1, acc_1 = model_1.evaluate(test_data)
Пример #25
0
def SRNet(input_shape=None):
    """
    Deep Residual Network for Steganalysis of Digital Images. M. Boroumand,
    M. Chen, J. Fridrich. http://www.ws.binghamton.edu/fridrich/Research/SRNet.pdf
    """

    from tensorflow.keras.models import Model
    from tensorflow.keras.layers import Dense, Dropout, Activation, Input, BatchNormalization
    from tensorflow.keras.layers import Conv2D, AveragePooling2D, GlobalAveragePooling2D
    from tensorflow.keras import optimizers
    from tensorflow.keras import initializers
    from tensorflow.keras import regularizers

    if input_shape == None:
        input_shape = (512, 512, 3)

    inputs = Input(shape=input_shape)
    x = inputs

    conv2d_params = {
        'padding': 'same',
        'data_format': 'channels_last',
        'bias_initializer': initializers.Constant(0.2),
        'bias_regularizer': None,
        'kernel_initializer': initializers.VarianceScaling(),
        'kernel_regularizer': regularizers.l2(2e-4),
    }

    avgpool_params = {
        'padding': 'same',
        'data_format': 'channels_last',
        'pool_size': (3, 3),
        'strides': (2, 2)
    }

    bn_params = {'momentum': 0.9, 'center': True, 'scale': True}

    x = Conv2D(64, (3, 3), strides=1, **conv2d_params)(x)
    x = BatchNormalization(**bn_params)(x)
    x = Activation("relu")(x)

    x = Conv2D(16, (3, 3), strides=1, **conv2d_params)(x)
    x = BatchNormalization(**bn_params)(x)
    x = Activation("relu")(x)

    for i in range(5):
        y = x
        x = Conv2D(16, (3, 3), **conv2d_params)(x)
        x = BatchNormalization(**bn_params)(x)
        x = Activation("relu")(x)
        x = Conv2D(16, (3, 3), **conv2d_params)(x)
        x = BatchNormalization(**bn_params)(x)
        x = add([x, y])
        y = x

    for f in [16, 64, 128, 256]:
        y = Conv2D(f, (1, 1), strides=2, **conv2d_params)(x)
        y = BatchNormalization(**bn_params)(y)
        x = Conv2D(f, (3, 3), **conv2d_params)(x)
        x = BatchNormalization(**bn_params)(x)
        x = Activation("relu")(x)
        x = Conv2D(f, (3, 3), **conv2d_params)(x)
        x = BatchNormalization(**bn_params)(x)
        x = AveragePooling2D(**avgpool_params)(x)
        x = add([x, y])

    x = Conv2D(512, (3, 3), **conv2d_params)(x)
    x = BatchNormalization(**bn_params)(x)
    x = Activation("relu")(x)
    x = Conv2D(512, (3, 3), **conv2d_params)(x)
    x = BatchNormalization(**bn_params)(x)
    x = GlobalAveragePooling2D(data_format="channels_first")(x)

    x = Dense(2,
              kernel_initializer=initializers.RandomNormal(mean=0.,
                                                           stddev=0.01),
              bias_initializer=initializers.Constant(0.))(x)
    x = Activation('softmax')(x)

    predictions = x

    model = Model(inputs=inputs, outputs=predictions)

    return model
Пример #26
0
def init_normal(shape=[0, 0.05], seed=None):
    """ A easy initializer"""
    mean, stddev = shape
    return initializers.RandomNormal(mean=mean, stddev=stddev, seed=seed)
Пример #27
0
    # Define how many past observations we want the control agent to process each step
    # for this case, we assume to pass only the single most recent observation
    window_length = 1

    # Define an artificial neural network to be used within the agent as actor
    # (using keras sequential)
    actor = Sequential()
    # The network's input fits the observation space of the env
    actor.add(
        Flatten(input_shape=(window_length, ) + env.observation_space.shape))
    actor.add(Dense(16, activation='relu'))
    actor.add(Dense(17, activation='relu'))
    # The network output fits the action space of the env
    actor.add(
        Dense(nb_actions,
              kernel_initializer=initializers.RandomNormal(stddev=1e-5),
              activation='tanh',
              kernel_regularizer=regularizers.l2(1e-2)))
    print(actor.summary())

    # Define another artificial neural network to be used within the agent as critic
    # note that this network has two inputs
    action_input = Input(shape=(nb_actions, ), name='action_input')
    observation_input = Input(shape=(window_length, ) +
                              env.observation_space.shape,
                              name='observation_input')
    # (using keras functional API)
    flattened_observation = Flatten()(observation_input)
    x = Concatenate()([action_input, flattened_observation])
    x = Dense(32, activation='relu')(x)
    x = Dense(32, activation='relu')(x)
def create_model(input_shape, init):
    """
    CNN model.

    Arguments:
    input_shape -- the shape of our input
    init -- the weight initialization

    Returns:
    CNN model
    """
    x = inp(shape=input_shape)
    x1 = Conv2D(32,
                3,
                activation="relu",
                kernel_initializer=init,
                bias_regularizer='l2',
                padding='same')(x)
    x1 = BatchNormalization()(x1)
    x2 = Conv2D(32,
                1,
                activation="relu",
                kernel_initializer=init,
                bias_regularizer='l2',
                padding='same')(x1)
    x2 = BatchNormalization()(x2)
    x3 = Concatenate()([x, x2])
    l = Reshape((-1, 256))(x2)
    l1 = LSTM(256,
              return_sequences=True,
              kernel_initializer=initializers.RandomNormal(stddev=0.001),
              dropout=0.5,
              recurrent_dropout=0.5)(l)
    # l1 = Dropout(0.5)(l1)
    l2 = LSTM(191,
              return_sequences=False,
              go_backwards=True,
              kernel_initializer=initializers.RandomNormal(stddev=0.001),
              dropout=0.5,
              recurrent_dropout=0.5)(l1)
    l2 = Dropout(0.5)(l2)

    x4 = Conv2D(64,
                3,
                activation="relu",
                kernel_initializer=init,
                bias_regularizer='l2',
                padding='same')(x3)
    x4 = BatchNormalization()(x4)
    x5 = Conv2D(64,
                3,
                activation="relu",
                kernel_initializer=init,
                bias_regularizer='l2',
                padding='same')(x4)
    x5 = BatchNormalization()(x5)
    x6 = Concatenate()([x3, x5])

    x7 = Conv2D(96,
                3,
                activation="relu",
                kernel_initializer=init,
                bias_regularizer='l2',
                padding='same')(x6)
    x7 = BatchNormalization()(x7)
    x8 = Conv2D(96,
                3,
                activation="relu",
                kernel_initializer=init,
                bias_regularizer='l2',
                padding='same')(x7)
    x8 = BatchNormalization()(x8)
    x9 = Concatenate()([x6, x8])

    x10 = Conv2D(128,
                 3,
                 activation="relu",
                 kernel_initializer=init,
                 bias_regularizer='l2',
                 padding='same')(x9)
    x10 = BatchNormalization()(x10)
    x11 = Conv2D(128,
                 3,
                 activation="relu",
                 kernel_initializer=init,
                 bias_regularizer='l2',
                 padding='same')(x10)
    # x8 = Concatenate()([x4,x6])
    x11 = BatchNormalization()(x11)
    x12 = Concatenate()([x9, x11])

    x13 = GlobalAveragePooling2D()(x12)

    x14 = Concatenate()([x13, l2])

    x14 = Reshape((-1, 128))(x14)
    x15 = LSTM(1024,
               return_sequences=True,
               kernel_initializer=initializers.RandomNormal(stddev=0.001),
               dropout=0.5,
               recurrent_dropout=0.5)(x14)
    # x15 = Dropout(0.5)(x15)
    x16 = LSTM(1024,
               go_backwards=True,
               return_sequences=False,
               kernel_initializer=initializers.RandomNormal(stddev=0.001),
               dropout=0.5,
               recurrent_dropout=0.5)(x15)
    x17 = Dropout(0.5)(x16)
    x18 = Dense(1, activation='sigmoid', kernel_initializer=init)(x17)

    model = Model(inputs=x, outputs=x18)

    return model
Пример #29
0
 def __init__(self,
              width,
              depth,
              num_anchors=9,
              separable_conv=True,
              freeze_bn=False,
              detect_quadrangle=False,
              **kwargs):
     super(BoxNet, self).__init__(**kwargs)
     self.width = width
     self.depth = depth
     self.num_anchors = num_anchors
     self.separable_conv = separable_conv
     self.detect_quadrangle = detect_quadrangle
     num_values = 9 if detect_quadrangle else 4
     options = {
         'kernel_size': 3,
         'strides': 1,
         'padding': 'same',
         'bias_initializer': 'zeros',
     }
     if separable_conv:
         kernel_initializer = {
             'depthwise_initializer': initializers.VarianceScaling(),
             'pointwise_initializer': initializers.VarianceScaling(),
         }
         options.update(kernel_initializer)
         self.convs = [
             layers.SeparableConv2D(filters=width,
                                    name=f'{self.name}/box-{i}',
                                    **options) for i in range(depth)
         ]
         self.head = layers.SeparableConv2D(filters=num_anchors *
                                            num_values,
                                            name=f'{self.name}/box-predict',
                                            **options)
     else:
         kernel_initializer = {
             'kernel_initializer':
             initializers.RandomNormal(mean=0.0, stddev=0.01, seed=None)
         }
         options.update(kernel_initializer)
         self.convs = [
             layers.Conv2D(filters=width,
                           name=f'{self.name}/box-{i}',
                           **options) for i in range(depth)
         ]
         self.head = layers.Conv2D(filters=num_anchors * num_values,
                                   name=f'{self.name}/box-predict',
                                   **options)
     self.bns = [[
         layers.BatchNormalization(momentum=MOMENTUM,
                                   epsilon=EPSILON,
                                   name=f'{self.name}/box-{i}-bn-{j}')
         for j in range(3, 8)
     ] for i in range(depth)]
     # self.bns = [[BatchNormalization(freeze=freeze_bn, name=f'{self.name}/box-{i}-bn-{j}') for j in range(3, 8)]
     #             for i in range(depth)]
     self.relu = layers.Lambda(lambda x: tf.nn.swish(x))
     self.reshape = layers.Reshape((-1, num_values))
     self.level = 0
Пример #30
0
def create_model(input_shape, init):
    """
	CNN model.

	Arguments:
		input_shape -- the shape of our input
		init -- the weight initialization

	Returns:
		CNN model    
	"""

    x = inp(shape=input_shape)
    x1 = Conv2D(32,
                3,
                activation="relu",
                kernel_initializer=init,
                bias_regularizer='l2',
                padding='same')(x)
    x1 = BatchNormalization()(x1)
    x2 = Conv2D(32,
                1,
                activation="relu",
                kernel_initializer=init,
                bias_regularizer='l2',
                padding='same')(x1)
    x2 = BatchNormalization()(x2)
    x3 = Concatenate()([x, x2])

    x4 = Conv2D(64,
                3,
                activation="relu",
                kernel_initializer=init,
                bias_regularizer='l2',
                padding='same')(x3)
    x4 = BatchNormalization()(x4)
    x5 = Conv2D(64,
                3,
                activation="relu",
                kernel_initializer=init,
                bias_regularizer='l2',
                padding='same')(x4)
    x5 = BatchNormalization()(x5)
    x6 = Concatenate()([x3, x5])

    x7 = Conv2D(96,
                3,
                activation="relu",
                kernel_initializer=init,
                bias_regularizer='l2',
                padding='same')(x6)
    x7 = BatchNormalization()(x7)
    x8 = Conv2D(96,
                3,
                activation="relu",
                kernel_initializer=init,
                bias_regularizer='l2',
                padding='same')(x7)
    x8 = BatchNormalization()(x8)
    x9 = Concatenate()([x6, x8])

    x10 = Conv2D(128,
                 3,
                 activation="relu",
                 kernel_initializer=init,
                 bias_regularizer='l2',
                 padding='same')(x9)
    x10 = BatchNormalization()(x10)
    x11 = Conv2D(128,
                 3,
                 activation="relu",
                 kernel_initializer=init,
                 bias_regularizer='l2',
                 padding='same')(x10)
    #x8 = Concatenate()([x4,x6])
    x11 = BatchNormalization()(x11)
    x12 = Concatenate()([x9, x11])

    x13 = GlobalAveragePooling2D()(x12)

    x14 = Flatten()(x13)
    x14 = Reshape((-1, 107))(x14)
    x15 = LSTM(1024,
               return_sequences=True,
               kernel_initializer=initializers.RandomNormal(stddev=0.001),
               dropout=0.5,
               recurrent_dropout=0.5)(x14)
    x16 = LSTM(1024,
               go_backwards=True,
               return_sequences=False,
               kernel_initializer=initializers.RandomNormal(stddev=0.001),
               dropout=0.5,
               recurrent_dropout=0.5)(x15)
    # model = Sequential()

    # model.add(Conv2D(32, kernel_size=(5, 5), activation='relu', kernel_initializer = init, bias_regularizer='l2', input_shape=input_shape))
    # model.add(BatchNormalization())
    # model.add(MaxPooling2D(pool_size=(2,2)))

    # model.add(Conv2D(64, kernel_size=(3, 3), activation='relu', kernel_initializer = init, bias_regularizer='l2'))
    # model.add(BatchNormalization())
    # model.add(Conv2D(64, kernel_size=(1, 1), activation='relu', kernel_initializer = init, bias_regularizer='l2'))

    # model.add(Conv2D(96, kernel_size=(3, 3), activation='relu', kernel_initializer = init, bias_regularizer='l2'))
    # model.add(BatchNormalization())
    # model.add(Conv2D(96, kernel_size=(1, 1), activation='relu', kernel_initializer = init, bias_regularizer='l2'))

    # model.add(GlobalAveragePooling2D())

    # model.add(Flatten())
    # model.add(Reshape((-1,8)))

    # model.add(Dropout(0.3))
    # model.add(LSTM(512, return_sequences=True,
    #                 kernel_initializer=initializers.RandomNormal(stddev=0.001), dropout=0.5, recurrent_dropout=0.5))
    # model.add(LSTM(512, go_backwards=True, return_sequences=False,
    #                 kernel_initializer=initializers.RandomNormal(stddev=0.001), dropout=0.5, recurrent_dropout=0.5))

    #model.add(LSTM(512, return_sequences=False,
    #                kernel_initializer=initializers.RandomNormal(stddev=0.001), dropout=0.5, recurrent_dropout=0.5))
    #model.add(LSTM(512, go_backwards=True, return_sequences=False,
    #                kernel_initializer=initializers.RandomNormal(stddev=0.001), dropout=0.5, recurrent_dropout=0.5))

    #model.add(BatchNormalization())
    x17 = Dropout(0.5)(x16)
    x18 = Dense(1, activation='sigmoid', kernel_initializer=init)(x17)

    model = Model(inputs=x, outputs=x18)

    return model
    '''