コード例 #1
0
  def create_model(self, n_dim, r):
    # load inputs
    X, _, _ = self.inputs
    K.set_session(self.sess)

    with tf.name_scope('generator'):
      x = X
      L = self.layers
      # dim/layer: 4096, 2048, 1024, 512, 256, 128,  64,  32,
      n_filters = [128, 384, 512, 512, 512, 512, 512, 512]
      n_filtersizes = [65, 33, 17,  9,  9,  9,  9, 9, 9]
      downsampling_l = []

      print('building model...')

      # downsampling layers
      for l, nf, fs in zip(range(L), n_filters, n_filtersizes):
        with tf.name_scope('downsc_conv%d' % l):
          x = (Convolution1D(nb_filter=nf, filter_length=fs, 
                  activation=None, border_mode='same', init=orthogonal_init,
                  subsample_length=2))(x)
          # if l > 0: x = BatchNormalization(mode=2)(x)
          x = LeakyReLU(0.2)(x)
          print('D-Block: ', x.get_shape())
          downsampling_l.append(x)

      # bottleneck layer
      with tf.name_scope('bottleneck_conv'):
          x = (Convolution1D(nb_filter=n_filters[-1], filter_length=n_filtersizes[-1], 
                  activation=None, border_mode='same', init=orthogonal_init,
                  subsample_length=2))(x)
          x = Dropout(p=0.5)(x)
          x = LeakyReLU(0.2)(x)

      # upsampling layers
      for l, nf, fs, l_in in list(reversed(list(zip(range(L), n_filters, n_filtersizes, downsampling_l)))):
        with tf.name_scope('upsc_conv%d' % l):
          # (-1, n/2, 2f)
          x = (Convolution1D(nb_filter=2*nf, filter_length=fs, 
                  activation=None, border_mode='same', init=orthogonal_init))(x)
          x = Dropout(p=0.5)(x)
          x = Activation('relu')(x)
          # (-1, n, f)
          x = SubPixel1D(x, r=2) 
          # (-1, n, 2f)
          x = K.concatenate(tensors=[x, l_in], axis=2)
          print('U-Block: ', x.get_shape())

      # final conv layer
      with tf.name_scope('lastconv'):
        x = Convolution1D(nb_filter=2, filter_length=9, 
                activation=None, border_mode='same', init=normal_init)(x)    
        x = SubPixel1D(x, r=2) 
        print(x.get_shape())

      g = merge([x, X], mode='sum')
    
    return g
コード例 #2
0
def unet3D(x_in,
           img_shape, out_im_chans,
           nf_enc=[64, 64, 128, 128, 256, 256, 512],
           nf_dec=None,
           layer_prefix='unet',
           n_convs_per_stage=1,
        ):
    ks = 3
    x = x_in

    encodings = []
    encoding_vol_sizes = []
    for i in range(len(nf_enc)):
        for j in range(n_convs_per_stage):
            x = Conv3D(
                nf_enc[i],
                kernel_size=ks,
                strides=(1, 1, 1), padding='same',
                name='{}_enc_conv3D_{}_{}'.format(layer_prefix, i, j + 1))(x)
            x = LeakyReLU(0.2)(x)

        encodings.append(x)
        encoding_vol_sizes.append(np.asarray(x.get_shape().as_list()[1:-1]))

        if i < len(nf_enc) - 1:
            x = MaxPooling3D(pool_size=(2, 2, 2), padding='same', name='{}_enc_maxpool_{}'.format(layer_prefix, i))(x)

    if nf_dec is None:
        nf_dec = list(reversed(nf_enc[1:]))

    for i in range(len(nf_dec)):
        curr_shape = x.get_shape().as_list()[1:-1]

        # only do upsample if we are not yet at max resolution
        if np.any(curr_shape < list(img_shape[:len(curr_shape)])):
            us = (2, 2, 2)
            x = UpSampling3D(size=us, name='{}_dec_upsamp_{}'.format(layer_prefix, i))(x)

        # just concatenate the final layer here
        if i <= len(encodings) - 2:
            x = _pad_or_crop_to_shape_3D(x, np.asarray(x.get_shape().as_list()[1:-1]), encoding_vol_sizes[-i-2])
            x = Concatenate(axis=-1)([x, encodings[-i-2]])

        for j in range(n_convs_per_stage):
            x = Conv3D(nf_dec[i],
                       kernel_size=ks, strides=(1, 1, 1), padding='same',
                       name='{}_dec_conv3D_{}_{}'.format(layer_prefix, i, j))(x)
            x = LeakyReLU(0.2)(x)


    y = Conv3D(out_im_chans, kernel_size=1, padding='same',
               name='{}_dec_conv3D_final'.format(layer_prefix))(x)  # add your own activation after this model

    # add your own activation after this model
    return y
コード例 #3
0
  def create_model(self, n_dim, r):
    # load inputs
    X, _, _ = self.inputs
    K.set_session(self.sess)

    with tf.name_scope('generator'):
      x = X
      L = self.layers
      n_filters = [  128,  256,  512, 512, 512, 512, 512, 512]
      n_filtersizes = [65, 33, 17,  9,  9,  9,  9, 9, 9]
      downsampling_l = []

      print 'building model...'

      # downsampling layers
      for l, nf, fs in zip(range(L), n_filters, n_filtersizes):
        with tf.name_scope('downsc_conv%d' % l):
          conv1d = Conv1D(filters=nf, kernel_size=fs, padding='same', kernel_initializer='orthogonal', strides=2)
          x = conv1d(x)
          x = LeakyReLU(0.2)(x)
          print 'D-Block: ', x.get_shape()
          downsampling_l.append(x)

      # bottleneck layer
      with tf.name_scope('bottleneck_conv'):
          conv1d = Conv1D(filters=n_filters[-1], kernel_size=n_filtersizes[-1], padding='same', kernel_initializer='orthogonal', strides=2)
          x = conv1d(x)
          x = Dropout(rate=0.5)(x)
          x = LeakyReLU(0.2)(x)

      # upsampling layers
      for l, nf, fs, l_in in reversed(zip(range(L), n_filters, n_filtersizes, downsampling_l)):
        with tf.name_scope('upsc_conv%d' % l):
          conv1d = Conv1D(filters=2*nf, kernel_size=fs, padding='same', kernel_initializer='orthogonal')
          x = (conv1d)(x)
          x = Dropout(rate=0.5)(x)
          x = Activation('relu')(x)
          x = SubPixel1D(x, r=2)
          x = Concatenate()([x, l_in])
          print 'U-Block: ', x.get_shape()

      # final conv layer
      with tf.name_scope('lastconv'):
        x = Conv1D(filters=2, kernel_size=9, padding='same', kernel_initializer='random_normal')(x)
        x = SubPixel1D(x, r=2) 
        print x.get_shape()

      g = Add()([x, X])

    return g
コード例 #4
0
    def build_critic(self):     #ResNet

        X_input = Input(shape=self.img_shape)
        X = X_input
        num_conv_layers = 6
        num_res_blocks = 4
        num_channels = [32, 64, 128, 256, 256, 512, 512, 512, 512]
        for i in range(num_conv_layers):
            X = Conv2D(num_channels[i], kernel_size=3, strides=2, padding="same")(X)
            X = LeakyReLU(alpha=0.2)(X)
            if num_conv_layers - i <= num_res_blocks:
                ResX = X
                X = Conv2D(X.get_shape().as_list()[-1], kernel_size=3, strides=1, padding="same")(X)
                X = LeakyReLU(alpha=0.2)(X)
                X = Conv2D(X.get_shape().as_list()[-1], kernel_size=3, strides=1, padding="same")(X)
                X = Add()([X, ResX])
                X = LeakyReLU(alpha=0.2)(X)
        X = Conv2D(int(num_channels[num_conv_layers]/4), kernel_size=1, strides=1, padding="same")(X)
        X = LeakyReLU(alpha=0.2)(X)
        X = Conv2D(1, kernel_size=1, strides=1, padding="same")(X)
     
        model =  Model(inputs = X_input, outputs = X)
        model.summary()
        return model
コード例 #5
0
def unet3D(
    x_in,
    img_shape,
    out_im_chans,
    nf_enc=[64, 64, 128, 128, 256, 256, 512],
    nf_dec=None,
    regularizer=None,
    initializer=None,
    layer_prefix='unet',
    n_convs_per_stage=1,
    include_residual=False,
    use_maxpool=True,
    max_time_downsample=None,
    n_tasks=1,
    use_dropout=False,
    do_unpool=False,
    do_last_conv=True,
):
    ks = 3
    if max_time_downsample is None:
        max_time_downsample = len(
            nf_enc)  # downsample in time all the way down

        encoding_im_sizes = np.asarray([(
            int(np.ceil(img_shape[0] / 2.0**i)),
            int(np.ceil(img_shape[1] / 2.0**i)),
            int(np.ceil(img_shape[2] / 2.0**i)),
        ) for i in range(0,
                         len(nf_enc) + 1)])
    else:
        encoding_im_sizes = np.asarray([(
            int(np.ceil(img_shape[0] / 2.0**i)),
            int(np.ceil(img_shape[1] / 2.0**i)),
            max(int(np.ceil(img_shape[2] / 2.0**(max_time_downsample))),
                int(np.ceil(img_shape[2] / 2.0**i))),
        ) for i in range(0,
                         len(nf_enc) + 1)])

    reg_params = {}
    if regularizer == 'l1':
        reg = regularizers.l1(1e-6)
    else:
        reg = None

    if initializer == 'zeros':
        reg_params['kernel_initializer'] = initializers.Zeros()

    x = x_in

    encodings = []
    encoding_im_sizes = []
    for i in range(len(nf_enc)):
        if not use_maxpool and i > 0:
            x = LeakyReLU(0.2)(x)

        for j in range(n_convs_per_stage):
            if nf_enc[
                    i] is not None:  # in case we dont want to convovle at max resolution
                x = Conv3D(nf_enc[i],
                           kernel_regularizer=reg,
                           kernel_size=ks,
                           strides=(1, 1, 1),
                           padding='same',
                           name='{}_enc_conv3D_{}_{}'.format(
                               layer_prefix, i, j + 1))(x)
            #if use_dropout:
            #	x = Dropout(0.2)(x)

            if j == 0 and include_residual:
                residual_input = x
            elif j == n_convs_per_stage - 1 and include_residual:
                x = Add()([residual_input, x])

            x = LeakyReLU(0.2)(x)

        encodings.append(x)
        encoding_im_sizes.append(np.asarray(x.get_shape().as_list()[1:-1]))

        # only downsample if we haven't reached the max
        if i >= max_time_downsample:
            ds = (2, 2, 1)
        else:
            ds = (2, 2, 2)

        if i < len(nf_enc) - 1:
            if use_maxpool:
                x = MaxPooling3D(pool_size=ds,
                                 padding='same',
                                 name='{}_enc_maxpool_{}'.format(
                                     layer_prefix, i))(x)
                #x, pool_idxs = Lambda(lambda x:._max_pool_3d_with_argmax(x, ksize=ks, strides=(2, 2, 2), padding='same'), name='{}_enc_maxpool3dwithargmax_{}'.format(layer_prefix, i))(x)
            else:
                x = Conv3D(nf_enc[i],
                           kernel_size=ks,
                           strides=ds,
                           padding='same',
                           name='{}_enc_conv3D_{}'.format(layer_prefix, i))(x)

    if nf_dec is None:
        nf_dec = list(reversed(nf_enc[1:]))

    decoder_outputs = []
    x_encoded = x
    print(encoding_im_sizes)
    print(nf_dec)
    for ti in range(n_tasks):
        decoding_im_sizes = []
        x = x_encoded
        for i in range(len(nf_dec)):
            curr_shape = x.get_shape().as_list()[1:-1]

            print('Current shape {}, img shape {}'.format(
                x.get_shape().as_list(), img_shape))
            # only do upsample if we are not yet at max resolution
            if np.any(curr_shape < list(img_shape[:len(curr_shape)])):
                # TODO: fix this for time
                '''
                if i < len(nf_dec) - max_time_downsample + 1 \
                         or curr_shape[-1] >= encoding_im_sizes[-i-2][-1]:  # if we are already at the correct time scale
                    us = (2, 2, 1)
                else:
                '''
                us = (2, 2, 2)
                #decoding_im_sizes.append(encoding_im_sizes[-i-1] * np.asarray(us))

                x = UpSampling3D(size=us,
                                 name='{}_dec{}_upsamp_{}'.format(
                                     layer_prefix, ti, i))(x)

            # just concatenate the final layer here
            if i <= len(encodings) - 2:
                x = _pad_or_crop_to_shape_3D(
                    x, np.asarray(x.get_shape().as_list()[1:-1]),
                    encoding_im_sizes[-i - 2])
                x = Concatenate(axis=-1)([x, encodings[-i - 2]])
                #x = LeakyReLU(0.2)(x)
            residual_input = x

            for j in range(n_convs_per_stage):
                x = Conv3D(nf_dec[i],
                           kernel_regularizer=reg,
                           kernel_size=ks,
                           strides=(1, 1, 1),
                           padding='same',
                           name='{}_dec{}_conv3D_{}_{}'.format(
                               layer_prefix, ti, i, j))(x)
                if use_dropout and i < 2:
                    x = Dropout(0.2)(x)
                if j == 0 and include_residual:
                    residual_input = x
                elif j == n_convs_per_stage - 1 and include_residual:
                    x = Add()([residual_input, x])
                x = LeakyReLU(0.2)(x)

        if do_last_conv:
            y = Conv3D(out_im_chans,
                       kernel_size=1,
                       padding='same',
                       kernel_regularizer=reg,
                       name='{}_dec{}_conv3D_final'.format(layer_prefix, ti))(
                           x)  # add your own activation after this model
        else:
            y = x
        decoder_outputs.append(y)
    # add your own activation after this model
    if n_tasks == 1:
        return y
    else:
        return decoder_outputs
コード例 #6
0
def unet2D(
    x_in,
    input_shape,
    out_im_chans,
    nf_enc=[64, 64, 128, 128, 256, 256, 512],
    nf_dec=None,
    regularizer=None,
    initializer=None,
    layer_prefix='unet',
    n_convs_per_stage=1,
    use_residuals=False,
    use_maxpool=False,
    concat_at_stages=None,
    do_last_conv=True,
    ks=3,
):

    reg_params = {}
    if regularizer == 'l1':
        reg = regularizers.l1(1e-6)
    else:
        reg = None

    if initializer == 'zeros':
        reg_params['kernel_initializer'] = initializers.Zeros()

    x = x_in
    encodings = []
    for i in range(len(nf_enc)):
        if not use_maxpool and i > 0:
            x = LeakyReLU(0.2)(x)

        for j in range(n_convs_per_stage):
            if nf_enc[
                    i] is not None:  # in case we dont want to convolve at the first resolution
                x = Conv2D(nf_enc[i],
                           kernel_regularizer=reg,
                           kernel_size=ks,
                           strides=(1, 1),
                           padding='same',
                           name='{}_enc_conv2D_{}_{}'.format(
                               layer_prefix, i, j + 1))(x)

            if concat_at_stages and concat_at_stages[i] is not None:
                x = Concatenate(axis=-1)([x, concat_at_stages[i]])

            if j == 0 and use_residuals:
                residual_input = x
            elif j == n_convs_per_stage - 1 and use_residuals:
                x = Add()([residual_input, x])
            x = LeakyReLU(0.2)(x)

        if i < len(nf_enc) - 1:
            encodings.append(x)
            if use_maxpool:
                x = MaxPooling2D(pool_size=(2, 2),
                                 padding='same',
                                 name='{}_enc_maxpool_{}'.format(
                                     layer_prefix, i))(x)
            else:
                x = Conv2D(nf_enc[i],
                           kernel_size=ks,
                           strides=(2, 2),
                           padding='same',
                           name='{}_enc_conv2D_{}'.format(layer_prefix, i))(x)

    print('Encodings to concat later: {}'.format(encodings))
    if nf_dec is None:
        nf_dec = list(reversed(nf_enc[1:]))
    print('Decoder channels: {}'.format(nf_dec))

    for i in range(len(nf_dec)):
        curr_shape = x.get_shape().as_list()[1:-1]

        # if we're not at full resolution, keep upsampling
        if np.any(list(curr_shape[:2]) < list(input_shape[:2])):
            x = UpSampling2D(size=(2, 2),
                             name='{}_dec_upsamp_{}'.format(layer_prefix,
                                                            i))(x)

        # if we still have things to concatenate, do that
        if (i + 1) <= len(encodings):
            curr_shape = x.get_shape().as_list()[1:-1]
            concat_with_shape = encodings[-i - 1].get_shape().as_list()[1:-1]
            x = _pad_or_crop_to_shape(x, curr_shape, concat_with_shape)
            x = Concatenate()([x, encodings[-i - 1]])

        residual_input = x

        for j in range(n_convs_per_stage):
            x = Conv2D(nf_dec[i],
                       kernel_regularizer=reg,
                       kernel_size=ks,
                       strides=(1, 1),
                       padding='same',
                       name='{}_dec_conv2D_{}_{}'.format(layer_prefix, i,
                                                         j))(x)
            if j == 0 and use_residuals:
                residual_input = x
            elif j == n_convs_per_stage - 1 and use_residuals:
                x = Add()([residual_input, x])
            x = LeakyReLU(0.2)(x)

    #x = Concatenate()([x, encodings[0]])
    '''
    for j in range(n_convs_per_stage - 1):
        x = Conv2D(out_im_chans,
                   kernel_regularizer=reg,
                   kernel_size=ks, strides=(1, 1), padding='same',
                   name='{}_dec_conv2D_last_{}'.format(layer_prefix, j))(x)
        x = LeakyReLU(0.2)(x)
    '''
    if do_last_conv:
        y = Conv2D(out_im_chans,
                   kernel_size=1,
                   padding='same',
                   kernel_regularizer=reg,
                   name='{}_dec_conv2D_final'.format(layer_prefix))(
                       x)  # add your own activation after this model
    else:
        y = x

    # add your own activation after this model
    return y
コード例 #7
0
  def create_model(self, n_dim, r):
    # load inputs
    X, _, _ = self.inputs
    K.set_session(self.sess)

    with tf.compat.v1.name_scope('generator'):
      x = X
      L = self.layers
      n_filters = [  128,  256,  512, 512, 512, 512, 512, 512]
      n_blocks = [ 128, 64, 32, 16, 8]
      n_filtersizes = [65, 33, 17,  9,  9,  9,  9, 9, 9]
      downsampling_l = []

      print('building model...')

      def _make_normalizer(x_in, n_filters, n_block):
        """applies an lstm layer on top of x_in"""
        x_shape = tf.shape(input=x_in)

        n_steps = x_shape[1] / int(n_block) # will be 32 at training

        # first, apply standard conv layer to reduce the dimension
        # input of (-1, 4096, 128) becomes (-1, 32, 128)
        # input of (-1, 512, 512) becomes (-1, 32, 512)

        x_in_down = (MaxPooling1D(pool_size=int(n_block), padding='valid'))(x_in)

        # pooling to reduce dimension
        x_shape = tf.shape(input=x_in_down)

        x_rnn = LSTM(units = n_filters, return_sequences = True)(x_in_down)

        # output: (-1, n_steps, n_filters)
        return x_rnn

      def _apply_normalizer(x_in, x_norm, n_filters, n_block):
        x_shape = tf.shape(input=x_in)
        n_steps = x_shape[1] / int(n_block) # will be 32 at training

        # reshape input into blocks
        x_in = tf.reshape(x_in, shape=(-1, n_steps, int(n_block), n_filters))
        x_norm = tf.reshape(x_norm, shape=(-1, n_steps, 1, n_filters))

        # multiply
        x_out = x_norm * x_in

        # return to original shape
        x_out = tf.reshape(x_out, shape=x_shape)

        return x_out


      # downsampling layers
      for l, nf, fs in zip(list(range(L)), n_filters, n_filtersizes):
        with tf.compat.v1.name_scope('downsc_conv%d' % l):
          x = (Conv1D(filters=nf, kernel_size=fs, dilation_rate = DRATE,
                  activation=None, padding='same', kernel_initializer=Orthogonal()))(x)
          x = (MaxPooling1D(pool_size=self.pool_size, padding='valid', strides=self.strides))(x)
          x = LeakyReLU(0.2)(x)

          # create and apply the normalizer
          nb = 128 / (2**l)

          params_before = np.sum([np.prod(v.get_shape().as_list()) for v in tf.compat.v1.trainable_variables()])
          x_norm = _make_normalizer(x, nf, nb)
          params_after = np.sum([np.prod(v.get_shape().as_list()) for v in tf.compat.v1.trainable_variables()])

          x = _apply_normalizer(x, x_norm, nf, nb)

          print('D-Block: ', x.get_shape())
          downsampling_l.append(x)

      # bottleneck layer
      with tf.compat.v1.name_scope('bottleneck_conv'):
          x = (Conv1D(filters=n_filters[-1], kernel_size=n_filtersizes[-1], dilation_rate = DRATE,
                  activation=None, padding='same', kernel_initializer=Orthogonal))(x)
          x = (MaxPooling1D(pool_size=self.pool_size, padding='valid', strides=self.strides))(x)
          x = Dropout(rate=0.5)(x)
          x = LeakyReLU(0.2)(x)

          # create and apply the normalizer
          nb = 128 / (2**L)
          x_norm = _make_normalizer(x, n_filters[-1], nb)
          x = _apply_normalizer(x, x_norm, n_filters[-1], nb)

      # upsampling layers
      for l, nf, fs, l_in in reversed(list(zip(list(range(L)), n_filters, n_filtersizes, downsampling_l))):
        with tf.compat.v1.name_scope('upsc_conv%d' % l):
          # (-1, n/2, 2f)
          x = (Conv1D(filters=2*nf, kernel_size=fs, dilation_rate = DRATE,
                  activation=None, padding='same', kernel_initializer=Orthogonal()))(x)

          x = Dropout(rate=0.5)(x)
          x = Activation('relu')(x)
          # (-1, n, f)
          x = SubPixel1D(x, r=2)

          # create and apply the normalizer

          x_norm = _make_normalizer(x, nf, nb)
          x = _apply_normalizer(x, x_norm, nf, nb)
          # (-1, n, 2f)
          x = Concatenate()([x, l_in])
          print('U-Block: ', x.get_shape())

      # final conv layer
      with tf.compat.v1.name_scope('lastconv'):
        x = Conv1D(filters=2, kernel_size=9,
                activation=None, padding='same', kernel_initializer=RandomNormal(stddev=1e-3))(x)
        x = SubPixel1D(x, r=2)

      g = Add()([x, X])
    return g
コード例 #8
0
def audiounet_processing(x_inp, params):
    n_dim = int(x_inp.get_shape()[1])
    x = x_inp
    L = params['layers']
    r = params['r']
    additive_skip_connection = params['additive_skip_connection']
    n_filtersizes = params['n_filtersizes']
    # dim/layer è sempre : n_dim/2 , n_dim/4, n_dim/8, ...
    #Il numero di canali dipende invece da n_filters.
    if n_dim == 4096:
        print('n_dim = 4096')
        n_filters = params['n_filters_spectral_branch']
    elif n_dim == 8192:
        print('n_dim = 8192')
        n_filters = params['n_filters_time_branch']
    else:
        print('I need other n_dim')
        return None
    downsampling_l = []

    # downsampling layers
    for l, nf, fs in zip(range(L), n_filters, n_filtersizes):
        with tf.name_scope('downsc_conv%d' % l):
            x = (Convolution1D(
                nb_filter=nf,
                filter_length=fs,
                activation=None,
                border_mode='same',
                input_dim=(n_dim, 1),
                subsample_length=2,
                kernel_initializer=tf.orthogonal_initializer()))(
                    x)  #init=orthogonal_init,
            # if l > 0: x = BatchNormalization(mode=2)(x)
            x = LeakyReLU(0.2)(x)
            print('D-Block: {}'.format(x.get_shape()))
            downsampling_l.append(x)
    #print(x)

    # bottleneck layer
    with tf.name_scope('bottleneck_conv'):
        x = (Convolution1D(nb_filter=n_filters[-1],
                           filter_length=n_filtersizes[-1],
                           activation=None,
                           border_mode='same',
                           subsample_length=2,
                           kernel_initializer=tf.orthogonal_initializer()))(
                               x)  #init=orthogonal_init,
        x = Dropout(p=0.5)(x)
        x = LeakyReLU(0.2)(x)
        print('B-Block: {}'.format(x.get_shape()))
    # upsampling layers
    for l, nf, fs, l_in in list(
            zip(range(L), n_filters, n_filtersizes, downsampling_l))[::-1]:
        #print(l, nf, fs, l_in)
        with tf.name_scope('upsc_conv%d' % l):
            # (-1, n/2, 2f)
            x = (Convolution1D(
                nb_filter=2 * nf,
                filter_length=fs,
                activation=None,
                border_mode='same',
                kernel_initializer=tf.orthogonal_initializer()))(
                    x)  #init=orthogonal_init,
            x = Dropout(p=0.5)(x)
            x = Activation('relu')(x)
            # (-1, n, f)
            x = SubPixel1D(x, r=2)
            # (-1, n, 2f)
            x = K.concatenate(tensors=[x, l_in], axis=2)
            print('U-Block: {}'.format(x.get_shape()))

    # final conv layer
    with tf.name_scope('lastconv'):
        x = Convolution1D(nb_filter=2,
                          filter_length=9,
                          activation=None,
                          border_mode='same',
                          kernel_initializer=tf.orthogonal_initializer())(
                              x)  #init=orthogonal_init,
        x = SubPixel1D(x, r=2)

    if additive_skip_connection == True:
        x = tf.add(x, x_inp)

    return x  #x è (?, 8192). // con tf.reshape(x, [-1, n_dim]) aggiungo un canale alla fine -> diventerebbe (?, 8192, 1)
コード例 #9
0
def create_model():
    X = Input(shape=(64, 128, 1))

    with tf.name_scope('generator'):
        X2 = Reshape((1, -1, 1))(X)

        x = X2
        L = 4
        n_filters = [128, 256, 512, 512, 512, 512, 512, 512]
        n_filtersizes = [65, 33, 17, 9, 9, 9, 9, 9, 9]
        downsampling_l = []

        print 'building model...'
        print 'Input: %s' % x.get_shape()

        # downsampling layers
        for l, nf, fs in zip(range(L), n_filters, n_filtersizes):
            with tf.name_scope('downsc_conv%d' % l):
                x = Conv2D(filters=nf,
                           kernel_size=(1, fs),
                           padding='same',
                           kernel_initializer='orthogonal',
                           strides=2)(x)
                x = LeakyReLU(0.2)(x)
                print 'D-Block-%d: %s' % (l, x.get_shape())
                downsampling_l.append(x)

        # bottleneck layer
        with tf.name_scope('bottleneck_conv'):
            x = Conv2D(filters=n_filters[-1],
                       kernel_size=(1, n_filtersizes[-1]),
                       padding='same',
                       kernel_initializer='orthogonal',
                       strides=2)(x)
            x = Dropout(rate=0.5)(x)
            x = LeakyReLU(0.2)(x)
            print 'B-Block: ', x.get_shape()

        # upsampling layers
        for l, nf, fs, l_in in reversed(
                zip(range(L), n_filters, n_filtersizes, downsampling_l)):
            with tf.name_scope('upsc_conv%d' % l):
                x = Conv2D(filters=nf * 2,
                           kernel_size=(1, fs),
                           padding='same',
                           kernel_initializer='orthogonal')(x)
                x = Dropout(rate=0.5)(x)
                x = Activation('relu')(x)
                x = subpixel1D(x, r=2)

                # CoreML not support 3d concatenate(axis=-1), so following error was happend.
                # >> raise ValueError('Only channel and sequence concatenation are supported.')
                # workaround: temporary reshape to 4-d, before concat back to 3-d

                #print 'concat', x.shape, l_in.shape
                s = (1, -1, int(x.shape[-1]))
                x = Reshape(s)(x)
                l_in = Reshape(s)(l_in)
                x = Concatenate(axis=-1)([x, l_in])
                s = (1, -1, int(x.shape[-1]))
                x = Reshape(s)(x)

                print 'U-Block-%d: %s' % (l, x.get_shape())

        # final conv layer
        with tf.name_scope('lastconv'):
            x = Conv2D(filters=2,
                       kernel_size=(1, 9),
                       padding='same',
                       kernel_initializer='random_normal')(x)
            x = subpixel1D(x, r=2)
            print 'Last-Block-1: %s' % x.get_shape()

        x = Add()([x, X2])

    model = Model(inputs=X, outputs=x)
    adam = Adam(lr=3e-4, beta_1=0.9, beta_2=0.999)
    model.compile(optimizer=adam,
                  loss=mean_sqrt_l2_error,
                  metrics=[mean_sqrt_l2_error, signal_noise_rate])
    return model
コード例 #10
0
def tfilm_processing(x_inp, params):
    n_dim = int(x_inp.get_shape()[1])
    DRATE = 2
    # load inputs
    x = x_inp
    L = params['layers']
    r = params['r']
    additive_skip_connection = params['additive_skip_connection']
    n_filtersizes = params['n_filtersizes']
    # dim/layer è sempre : n_dim/2 , n_dim/4, n_dim/8, ...
    #Il numero di canali dipende invece da n_filters.
    if n_dim == 4096:
        print('n_dim = 4096')
        n_filters = params['n_filters_spectral_branch']
    elif n_dim == 8192:
        print('n_dim = 8192')
        n_filters = params['n_filters_time_branch']
    else:
        print('I need other n_dim')
        return None
    downsampling_l = []

    # downsampling layers
    for l, nf, fs in zip(range(L), n_filters, n_filtersizes):
        with tf.name_scope('downsc_conv%d' % l):
            x = (AtrousConvolution1D(
                nb_filter=nf,
                filter_length=fs,
                atrous_rate=DRATE,
                activation=None,
                border_mode='same',
                subsample_length=1,
                kernel_initializer=tf.orthogonal_initializer()))(
                    x)  #init=orthogonal_init
            x = (MaxPooling1D(pool_length=2, border_mode='valid'))(x)
            x = LeakyReLU(0.2)(x)
            # create and apply the normalizer
            nb = 128 / (2**l)
            params_before = np.sum([
                np.prod(v.get_shape().as_list())
                for v in tf.trainable_variables()
            ])
            x_norm = _make_normalizer(x, nf, nb)
            params_after = np.sum([
                np.prod(v.get_shape().as_list())
                for v in tf.trainable_variables()
            ])
            x = _apply_normalizer(x, x_norm, nf, nb)
            print('D-Block: {}'.format(x.get_shape()))
            downsampling_l.append(x)

    # bottleneck layer
    with tf.name_scope('bottleneck_conv'):
        x = (AtrousConvolution1D(
            nb_filter=n_filters[-1],
            filter_length=n_filtersizes[-1],
            atrous_rate=DRATE,
            activation=None,
            border_mode='same',
            subsample_length=1,
            kernel_initializer=tf.orthogonal_initializer()))(
                x)  #init=orthogonal_init
        x = (MaxPooling1D(pool_length=2, border_mode='valid'))(x)
        x = Dropout(p=0.5)(x)
        x = LeakyReLU(0.2)(x)
        # create and apply the normalizer
        nb = 128 / (2**L)
        x_norm = _make_normalizer(x, n_filters[-1], nb)
        x = _apply_normalizer(x, x_norm, n_filters[-1], nb)
        print('B-Block: {}'.format(x.get_shape()))

    # upsampling layers
    for l, nf, fs, l_in in list(
            zip(range(L), n_filters, n_filtersizes, downsampling_l))[::-1]:
        with tf.name_scope('upsc_conv%d' % l):
            x = (AtrousConvolution1D(
                nb_filter=2 * nf,
                filter_length=fs,
                atrous_rate=DRATE,
                activation=None,
                border_mode='same',
                kernel_initializer=tf.orthogonal_initializer()))(
                    x)  #init=orthogonal_init
            x = Dropout(p=0.5)(x)
            x = Activation('relu')(x)
            x = SubPixel1D(x, r=2)
            # create and apply the normalizer
            x_norm = _make_normalizer(x, nf, nb)
            x = _apply_normalizer(x, x_norm, nf, nb)
            x = K.concatenate(tensors=[x, l_in], axis=2)
            print('U-Block: {}'.format(x.get_shape()))

    with tf.name_scope('lastconv'):
        x = Convolution1D(nb_filter=2,
                          filter_length=9,
                          activation=None,
                          border_mode='same',
                          kernel_initializer=tf.keras.initializers.normal())(
                              x)  #, init=normal_init
        x = SubPixel1D(x, r=2)

    if additive_skip_connection == True:
        x = tf.add(x, x_inp)

    return x  #x è (?, 8192). // con tf.reshape(x, [-1, n_dim]) aggiungo un canale alla fine -> diventerebbe (?, 8192, 1)
コード例 #11
0
    def create_model(self, n_dim, r):
        # load inputs
        X, _, _ = self.inputs
        K.set_session(self.sess)

        with tf.name_scope('generator'):
            x = X
            L = self.layers
            n_filters = [128, 256, 512, 512, 512, 512, 512, 512]
            n_blocks = [128, 64, 32, 16, 8]
            n_filtersizes = [65, 33, 17, 9, 9, 9, 9, 9, 9]
            downsampling_l = []

            print 'building model...'

            def _make_normalizer(x_in, n_filters, n_block):
                """applies an lstm layer on top of x_in"""
                x_shape = tf.shape(x_in)
                n_steps = x_shape[1] / n_block  # will be 32 at training

                # first, apply standard conv layer to reduce the dimension
                # input of (-1, 4096, 128) becomes (-1, 32, 128)
                # input of (-1, 512, 512) becomes (-1, 32, 512)

                x_in_down = (MaxPooling1D(pool_length=n_block,
                                          border_mode='valid'))(x_in)

                # pooling to reduce dimension
                x_shape = tf.shape(x_in_down)

                x_rnn = LSTM(output_dim=n_filters,
                             return_sequences=True)(x_in_down)

                # output: (-1, n_steps, n_filters)
                return x_rnn

            def _apply_normalizer(x_in, x_norm, n_filters, n_block):
                x_shape = tf.shape(x_in)
                n_steps = x_shape[1] / n_block  # will be 32 at training

                # reshape input into blocks
                x_in = tf.reshape(x_in,
                                  shape=(-1, n_steps, n_block, n_filters))
                x_norm = tf.reshape(x_norm, shape=(-1, n_steps, 1, n_filters))

                # multiply
                x_out = x_norm * x_in

                # return to original shape
                x_out = tf.reshape(x_out, shape=x_shape)

                return x_out

            # downsampling layers
            for l, nf, fs in zip(range(L), n_filters, n_filtersizes):
                with tf.name_scope('downsc_conv%d' % l):
                    x = (AtrousConvolution1D(nb_filter=nf,
                                             filter_length=fs,
                                             atrous_rate=DRATE,
                                             activation=None,
                                             border_mode='same',
                                             init=orthogonal_init,
                                             subsample_length=1))(x)
                    x = (MaxPooling1D(pool_length=2, border_mode='valid'))(x)
                    x = LeakyReLU(0.2)(x)

                    # create and apply the normalizer
                    nb = 128 / (2**l)

                    params_before = np.sum([
                        np.prod(v.get_shape().as_list())
                        for v in tf.trainable_variables()
                    ])
                    x_norm = _make_normalizer(x, nf, nb)
                    params_after = np.sum([
                        np.prod(v.get_shape().as_list())
                        for v in tf.trainable_variables()
                    ])

                    x = _apply_normalizer(x, x_norm, nf, nb)

                    print 'D-Block: ', x.get_shape()
                    downsampling_l.append(x)

            # bottleneck layer
            with tf.name_scope('bottleneck_conv'):
                x = (AtrousConvolution1D(nb_filter=n_filters[-1],
                                         filter_length=n_filtersizes[-1],
                                         atrous_rate=DRATE,
                                         activation=None,
                                         border_mode='same',
                                         init=orthogonal_init,
                                         subsample_length=1))(x)
                x = (MaxPooling1D(pool_length=2, border_mode='valid'))(x)
                x = Dropout(p=0.5)(x)
                x = LeakyReLU(0.2)(x)

                # create and apply the normalizer
                nb = 128 / (2**L)
                x_norm = _make_normalizer(x, n_filters[-1], nb)
                x = _apply_normalizer(x, x_norm, n_filters[-1], nb)

            # upsampling layers
            for l, nf, fs, l_in in reversed(
                    zip(range(L), n_filters, n_filtersizes, downsampling_l)):
                with tf.name_scope('upsc_conv%d' % l):
                    # (-1, n/2, 2f)
                    x = (AtrousConvolution1D(nb_filter=2 * nf,
                                             filter_length=fs,
                                             atrous_rate=DRATE,
                                             activation=None,
                                             border_mode='same',
                                             init=orthogonal_init))(x)

                    x = Dropout(p=0.5)(x)
                    x = Activation('relu')(x)
                    # (-1, n, f)
                    x = SubPixel1D(x, r=2)

                    # create and apply the normalizer
                    x_norm = _make_normalizer(x, nf, nb)
                    x = _apply_normalizer(x, x_norm, nf, nb)
                    # (-1, n, 2f)
                    x = merge([x, l_in], mode='concat', concat_axis=-1)
                    print 'U-Block: ', x.get_shape()

            # final conv layer
            with tf.name_scope('lastconv'):
                x = Convolution1D(nb_filter=2,
                                  filter_length=9,
                                  activation=None,
                                  border_mode='same',
                                  init=normal_init)(x)
                x = SubPixel1D(x, r=2)

            g = merge([x, X], mode='sum')
        return g
コード例 #12
0
  def create_model(self, n_dim, r):
    # load inputs
    X, _, _ = self.inputs
    K.set_session(self.sess)

    with tf.name_scope('generator'):
      x = X
      L = self.layers
      # dim/layer: 4096, 2048, 1024, 512, 256, 128,  64,  32,
      # n_filters = [  64,  128,  256, 384, 384, 384, 384, 384]
      n_filters = [  128,  256,  512, 512, 512, 512, 512, 512]
      # n_filters = [  256,  512,  512, 512, 512, 1024, 1024, 1024]
      # n_filtersizes = [129, 65,   33,  17,  9,  9,  9, 9]
      # n_filtersizes = [31, 31,   31,  31,  31,  31,  31, 31]
      n_filtersizes = [65, 33, 17,  9,  9,  9,  9, 9, 9]
      downsampling_l = []

      print 'building model...'

      # downsampling layers
      for l, nf, fs in zip(range(L), n_filters, n_filtersizes):
        with tf.name_scope('downsc_conv%d' % l):
          '''x = (Convolution1D(nb_filter=nf, filter_length=fs, 
                  activation=None, border_mode='same', init=orthogonal_init,
                  subsample_length=2))(x)'''
          x = Conv1D(filters=nf, kernel_size=fs, activation=None, padding='same', kernel_initializer='Orthogonal', strides=2)(x)
          # if l > 0: x = BatchNormalization(mode=2)(x)
          x = LeakyReLU(0.2)(x)
          print 'D-Block: ', x.get_shape()
          downsampling_l.append(x)

      # bottleneck layer
      with tf.name_scope('bottleneck_conv'):
          '''x = (Convolution1D(nb_filter=n_filters[-1], filter_length=n_filtersizes[-1], 
                  activation=None, border_mode='same', init=orthogonal_init,
                  subsample_length=2))(x)'''
          x = Conv1D(filters=n_filters[-1], kernel_size=n_filtersizes[-1], activation=None, padding='same', kernel_initializer='Orthogonal', strides=2)(x)
          #x = Dropout(p=0.5)(x)
          x = Dropout(rate=0.5)(x)
          # x = BatchNormalization(mode=2)(x)
          x = LeakyReLU(0.2)(x)

      # upsampling layers
      for l, nf, fs, l_in in reversed(zip(range(L), n_filters, n_filtersizes, downsampling_l)):
        with tf.name_scope('upsc_conv%d' % l):
          # (-1, n/2, 2f)
          '''x = (Convolution1D(nb_filter=2*nf, filter_length=fs, 
                  activation=None, border_mode='same', init=orthogonal_init))(x)'''
          x = Conv1D(filters=2*nf, kernel_size=fs, activation=None, padding='same', kernel_initializer='Orthogonal')(x)
          # x = BatchNormalization(mode=2)(x)
          #x = Dropout(p=0.5)(x)
          x = Dropout(rate=0.5)(x)
          x = Activation('relu')(x)
          # (-1, n, f)
          x = SubPixel1D(x, r=2) 
          # (-1, n, 2f)

          '''changed merge to new keras.layers.concact'''
          x = concatenate([x, l_in], axis=-1)
          #x = merge([x, l_in], mode='concat', concat_axis=-1) 
          print 'U-Block: ', x.get_shape()
          #print 'U-Block: ', x.output_shape

      # final conv layer
      with tf.name_scope('lastconv'):
        '''x = Convolution1D(nb_filter=2, filter_length=9, 
                activation=None, border_mode='same', init=normal_init)(x)'''
        x = Conv1D(filters=2, kernel_size=9, activation=None, padding='same', kernel_initializer='RandomNormal')(x)    
        x = SubPixel1D(x, r=2) 
        print x.get_shape()

      #g = merge([x, X], mode='sum')
      g = add([x, X])

    return g
コード例 #13
0
def unet3D(x_in,
           img_shape, out_im_chans,
           nf_enc=[64, 64, 128, 128, 256, 256, 512],
           nf_dec=None,
           regularizer=None, initializer=None, layer_prefix='unet',
           n_convs_per_stage=1,
           include_residual=False, use_maxpool=True,
           max_time_downsample=None,
           n_tasks=1,
           use_dropout=False,
           do_unpool=False,
            do_last_conv=True,
        ):
    ks = 3
    if max_time_downsample is None:
        max_time_downsample = len(nf_enc)  # downsample in time all the way down

        encoding_im_sizes = np.asarray([(
                    int(np.ceil(img_shape[0] / 2.0 ** i)),
                    int(np.ceil(img_shape[1] / 2.0 ** i)),
                    int(np.ceil(img_shape[2] / 2.0 ** i)),
                ) for i in range(0, len(nf_enc) + 1)])
    else:
        encoding_im_sizes = np.asarray([(
                    int(np.ceil(img_shape[0] / 2.0 ** i)),
                    int(np.ceil(img_shape[1] / 2.0 ** i)),
                    max(int(np.ceil(img_shape[2] / 2.0 ** (max_time_downsample))), int(np.ceil(img_shape[2] / 2.0 ** i))),
                ) for i in range(0, len(nf_enc) + 1)])

    reg_params = {}
    if regularizer == 'l1':
        reg = regularizers.l1(1e-6)
    else:
        reg = None

    if initializer == 'zeros':
        reg_params['kernel_initializer'] = initializers.Zeros()

    x = x_in

    encodings = []
    encoding_im_sizes = []
    for i in range(len(nf_enc)):
        if not use_maxpool and i > 0:
            x = LeakyReLU(0.2)(x)

        for j in range(n_convs_per_stage):
            if nf_enc[i] is not None:  # in case we dont want to convovle at max resolution
                x = Conv3D(
                    nf_enc[i],
                    kernel_regularizer=reg, kernel_size=ks,
                    strides=(1, 1, 1), padding='same',
                    name='{}_enc_conv3D_{}_{}'.format(layer_prefix, i, j + 1))(x)
            #if use_dropout:
            #	x = Dropout(0.2)(x)

            if j == 0 and include_residual:
                residual_input = x
            elif j == n_convs_per_stage - 1 and include_residual:
                x = Add()([residual_input, x])

            x = LeakyReLU(0.2)(x)

        encodings.append(x)
        encoding_im_sizes.append(np.asarray(x.get_shape().as_list()[1:-1]))

        # only downsample if we haven't reached the max
        if i >= max_time_downsample:
            ds = (2, 2, 1)
        else:
            ds = (2, 2, 2)

        if i < len(nf_enc) - 1:
            if use_maxpool:
                x = MaxPooling3D(pool_size=ds, padding='same', name='{}_enc_maxpool_{}'.format(layer_prefix, i))(x)
                #x, pool_idxs = Lambda(lambda x:._max_pool_3d_with_argmax(x, ksize=ks, strides=(2, 2, 2), padding='same'), name='{}_enc_maxpool3dwithargmax_{}'.format(layer_prefix, i))(x)
            else:
                x = Conv3D(nf_enc[i], kernel_size=ks, strides=ds,  padding='same', name='{}_enc_conv3D_{}'.format(layer_prefix, i))(x)

    if nf_dec is None:
        nf_dec = list(reversed(nf_enc[1:]))

    decoder_outputs = []
    x_encoded = x
    print(encoding_im_sizes)
    print(nf_dec)
    for ti in range(n_tasks):
        decoding_im_sizes = []
        x = x_encoded
        for i in range(len(nf_dec)):
            curr_shape = x.get_shape().as_list()[1:-1]

            print('Current shape {}, img shape {}'.format(x.get_shape().as_list(), img_shape))
            # only do upsample if we are not yet at max resolution
            if np.any(curr_shape < list(img_shape[:len(curr_shape)])):
                # TODO: fix this for time
                '''
                if i < len(nf_dec) - max_time_downsample + 1 \
                         or curr_shape[-1] >= encoding_im_sizes[-i-2][-1]:  # if we are already at the correct time scale
                    us = (2, 2, 1)
                else:
                '''
                us = (2, 2, 2)
                #decoding_im_sizes.append(encoding_im_sizes[-i-1] * np.asarray(us))

                x = UpSampling3D(size=us, name='{}_dec{}_upsamp_{}'.format(layer_prefix, ti, i))(x)

            # just concatenate the final layer here
            if i <= len(encodings) - 2:
                x = _pad_or_crop_to_shape_3D(x, np.asarray(x.get_shape().as_list()[1:-1]), encoding_im_sizes[-i-2])
                x = Concatenate(axis=-1)([x, encodings[-i-2]])
                #x = LeakyReLU(0.2)(x)
            residual_input = x

            for j in range(n_convs_per_stage):
                x = Conv3D(nf_dec[i],
                           kernel_regularizer=reg,
                           kernel_size=ks, strides=(1, 1, 1), padding='same',
                           name='{}_dec{}_conv3D_{}_{}'.format(layer_prefix, ti, i, j))(x)
                if use_dropout and i < 2:
                    x = Dropout(0.2)(x)
                if j == 0 and include_residual:
                    residual_input = x
                elif j == n_convs_per_stage - 1 and include_residual:
                    x = Add()([residual_input, x])
                x = LeakyReLU(0.2)(x)


        if do_last_conv:
            y = Conv3D(out_im_chans, kernel_size=1, padding='same', kernel_regularizer=reg,
                       name='{}_dec{}_conv3D_final'.format(layer_prefix, ti))(x)  # add your own activation after this model
        else:
            y = x
        decoder_outputs.append(y)
    # add your own activation after this model
    if n_tasks == 1:
        return y
    else:
        return decoder_outputs
コード例 #14
0
def unet2D(x_in,
           input_shape, out_im_chans,
           nf_enc=[64, 64, 128, 128, 256, 256, 512],
           nf_dec=None,
           regularizer=None, initializer=None, layer_prefix='unet',
           n_convs_per_stage=1,
           use_residuals=False,
           use_maxpool=False,
           concat_at_stages=None,
           do_last_conv=True,
           ks=3,
           ):

    reg_params = {}
    if regularizer == 'l1':
        reg = regularizers.l1(1e-6)
    else:
        reg = None

    if initializer == 'zeros':
        reg_params['kernel_initializer'] = initializers.Zeros()

    x = x_in
    encodings = []
    for i in range(len(nf_enc)):
        if not use_maxpool and i > 0:
            x = LeakyReLU(0.2)(x)

        for j in range(n_convs_per_stage):
            if nf_enc[i] is not None:  # in case we dont want to convolve at the first resolution
                x = Conv2D(nf_enc[i],
                           kernel_regularizer=reg, kernel_size=ks,
                           strides=(1, 1), padding='same',
                           name='{}_enc_conv2D_{}_{}'.format(layer_prefix, i, j + 1))(x)

            if concat_at_stages and concat_at_stages[i] is not None:
                x = Concatenate(axis=-1)([x, concat_at_stages[i]])

            if j == 0 and use_residuals:
                residual_input = x
            elif j == n_convs_per_stage - 1 and use_residuals:
                x = Add()([residual_input, x])
            x = LeakyReLU(0.2)(x)

        if i < len(nf_enc) - 1:
            encodings.append(x)
            if use_maxpool:
                x = MaxPooling2D(pool_size=(2, 2), padding='same',
                                 name='{}_enc_maxpool_{}'.format(layer_prefix, i))(x)
            else:
                x = Conv2D(nf_enc[i], kernel_size=ks, strides=(2, 2), padding='same',
                           name='{}_enc_conv2D_{}'.format(layer_prefix, i))(x)

    print('Encodings to concat later: {}'.format(encodings))
    if nf_dec is None:
        nf_dec = list(reversed(nf_enc[1:]))
    print('Decoder channels: {}'.format(nf_dec))

    for i in range(len(nf_dec)):
        curr_shape = x.get_shape().as_list()[1:-1]

        # if we're not at full resolution, keep upsampling
        if np.any(list(curr_shape[:2]) < list(input_shape[:2])):
            x = UpSampling2D(size=(2, 2), name='{}_dec_upsamp_{}'.format(layer_prefix, i))(x)

        # if we still have things to concatenate, do that
        if (i + 1) <= len(encodings):
            curr_shape = x.get_shape().as_list()[1:-1]
            concat_with_shape = encodings[-i - 1].get_shape().as_list()[1:-1]
            x = _pad_or_crop_to_shape(x, curr_shape, concat_with_shape)
            x = Concatenate()([x, encodings[-i - 1]])

        residual_input = x

        for j in range(n_convs_per_stage):
            x = Conv2D(nf_dec[i],
                       kernel_regularizer=reg,
                       kernel_size=ks, strides=(1, 1), padding='same',
                       name='{}_dec_conv2D_{}_{}'.format(layer_prefix, i, j))(x)
            if j == 0 and use_residuals:
                residual_input = x
            elif j == n_convs_per_stage - 1 and use_residuals:
                x = Add()([residual_input, x])
            x = LeakyReLU(0.2)(x)

    #x = Concatenate()([x, encodings[0]])
    '''
    for j in range(n_convs_per_stage - 1):
        x = Conv2D(out_im_chans,
                   kernel_regularizer=reg,
                   kernel_size=ks, strides=(1, 1), padding='same',
                   name='{}_dec_conv2D_last_{}'.format(layer_prefix, j))(x)
        x = LeakyReLU(0.2)(x)
    '''
    if do_last_conv:
        y = Conv2D(out_im_chans, kernel_size=1, padding='same', kernel_regularizer=reg,
               name='{}_dec_conv2D_final'.format(layer_prefix))(x)  # add your own activation after this model
    else:
        y = x

    # add your own activation after this model
    return y
コード例 #15
0
    def create_model(self, n_dim, r):
        # load inputs
        X, _, _ = self.inputs
        tf.compat.v1.keras.backend.set_session(self.sess)

        with tf.name_scope('generator'):
            x = X
            L = self.layers
            # dim/layer: 4096, 2048, 1024, 512, 256, 128,  64,  32,
            # n_filters = [  64,  128,  256, 384, 384, 384, 384, 384]
            n_filters = [128, 256, 512, 512, 512, 512, 512, 512]
            # n_filters = [  256,  512,  512, 512, 512, 1024, 1024, 1024]
            # n_filtersizes = [129, 65,   33,  17,  9,  9,  9, 9]
            # n_filtersizes = [31, 31,   31,  31,  31,  31,  31, 31]
            n_filtersizes = [65, 33, 17, 9, 9, 9, 9, 9, 9]
            downsampling_l = []

            print('building model...')

            # downsampling layers
            for l, nf, fs in zip(range(L), n_filters, n_filtersizes):
                with tf.name_scope('downsc_conv%d' % l):
                    x = (Conv1D(filters=nf,
                                kernel_size=fs,
                                activation=None,
                                padding='same',
                                kernel_initializer="Orthogonal",
                                strides=2))(x)
                    # if l > 0: x = BatchNormalization(mode=2)(x)
                    x = LeakyReLU(0.2)(x)
                    print('D-Block: ', x.get_shape())
                    downsampling_l.append(x)

            # bottleneck layer
            with tf.name_scope('bottleneck_conv'):
                x = (Conv1D(filters=n_filters[-1],
                            kernel_size=n_filtersizes[-1],
                            activation=None,
                            padding='same',
                            kernel_initializer="Orthogonal",
                            strides=2))(x)
                x = Dropout(rate=0.5)(x)
                # x = BatchNormalization(mode=2)(x)
                x = LeakyReLU(0.2)(x)

            # upsampling layers
            for l, nf, fs, l_in in reversed(
                    list(
                        zip(range(L), n_filters, n_filtersizes,
                            downsampling_l))):
                with tf.name_scope('upsc_conv%d' % l):
                    # (-1, n/2, 2f)
                    x = (Conv1D(filters=2 * nf,
                                kernel_size=fs,
                                activation=None,
                                padding='same',
                                kernel_initializer="Orthogonal"))(x)
                    # x = BatchNormalization(mode=2)(x)
                    x = Dropout(rate=0.5)(x)
                    x = Activation('relu')(x)
                    # (-1, n, f)
                    x = SubPixel1D(x, r=2)
                    x = Concatenate(axis=-1)([x, l_in])
                    # (-1, n, 2f)

                    print('U-Block: ', x.get_shape())

            # final conv layer
            with tf.name_scope('lastconv'):
                x = Conv1D(filters=2,
                           kernel_size=9,
                           activation=None,
                           padding='same',
                           kernel_initializer=normal_init)(x)
                x = SubPixel1D(x, r=2)
                print(x.get_shape())

            g = add([x, X])

        return g