Ejemplo n.º 1
0
    def __init__(self, filters=1, kernel_size=80, rank=1, strides=1, padding='valid',
                 data_format='channels_last', dilation_rate=1, activation=None, use_bias=True,
                 fsHz=1000.,
                 fc_initializer=initializers.RandomUniform(minval=10, maxval=400),
                 n_order_initializer=initializers.constant(4.),
                 amp_initializer=initializers.constant(10 ** 5),
                 beta_initializer=initializers.RandomNormal(mean=30, stddev=6),
                 bias_initializer='zeros',
                 **kwargs):
        super(Conv1D_gammatone, self).__init__(**kwargs)
        self.rank = rank
        self.filters = filters
        self.kernel_size_ = kernel_size
        self.kernel_size = conv_utils.normalize_tuple(kernel_size, rank, 'kernel_size')
        self.strides = conv_utils.normalize_tuple(strides, rank, 'strides')
        self.padding = conv_utils.normalize_padding(padding)
        self.data_format = conv_utils.normalize_data_format(data_format)
        self.dilation_rate = conv_utils.normalize_tuple(dilation_rate, rank, 'dilation_rate')
        self.activation = activations.get(activation)
        self.use_bias = use_bias
        self.bias_initializer = initializers.get(bias_initializer)
        self.fc_initializer = initializers.get(fc_initializer)
        self.n_order_initializer = initializers.get(n_order_initializer)
        self.amp_initializer = initializers.get(amp_initializer)
        self.beta_initializer = initializers.get(beta_initializer)
        self.input_spec = InputSpec(ndim=self.rank + 2)

        self.fsHz = fsHz
        self.t = tf.range(start=0, limit=kernel_size / float(fsHz),
                          delta=1 / float(fsHz), dtype=K.floatx())
        self.t = tf.expand_dims(input=self.t, axis=-1)
Ejemplo n.º 2
0
    def __init__(self, hyper_p={}):

        self.__dict__.update(self.Default, **hyper_p)

        if K.backend() != 'tensorflow':
            print('backend is ', K.backend())

        self.init_method = initializers.RandomNormal(mean=0.0,
                                                     stddev=0.05,
                                                     seed=None)

        # load piplines of 2 conected models
        self.recognition = self.encoder()
        self.generator = self.decoder()

        # define main network inputs
        input = Input(shape=(self.inp_shape, ),
                      dtype='float32',
                      name='VAE_input')
        mask_input = Input(shape=(self.inp_shape, ),
                           dtype='float32',
                           name='mask_input')

        self.z_mu, self.z_var = self.recognition(input)
        vae_out = self.generator([self.z_mu, self.z_var])

        ## generate vae_model by conecting recognition_model to generator_model
        self.vae_model = Model(inputs=[input, mask_input],
                               outputs=vae_out,
                               name='VAE')

        custom_loss = partial(self.custom_loss, mask_input)
        custom_loss.__name__ = "masked_bce"

        method = getattr(optimizers, self.optimiz)
        self.vae_model.compile(optimizer=method(lr=self.lr_rate),
                               loss=custom_loss)
Ejemplo n.º 3
0
        x_test[i] = data.reshape(dim, dim, 1)
        y_test[i] = object_file['label']
        # one_hot_labels = keras.utils.to_categorical(object_file['label'], num_classes=2)
        # y_test[i]=one_hot_labels

    training_data = objectnessgenerator.DataGenerator(filelist, mylist,
                                                      trainingfolder, winubu,
                                                      dim)

    model = Sequential()

    model.add(
        Conv2D(32, (6, 6),
               activation='relu',
               input_shape=(64, 64, 1),
               kernel_initializer=initializers.RandomNormal(stddev=0.001)))
    model.add(MaxPooling2D(pool_size=(2, 2), padding='same'))
    model.add(BatchNormalization())
    #model.add(Dropout(0.1))
    model.add(
        Conv2D(64, (3, 3),
               activation='relu',
               kernel_initializer=initializers.RandomNormal(stddev=0.001)))
    model.add(MaxPooling2D(pool_size=(2, 2), padding='same'))
    model.add(BatchNormalization())
    #model.add(Dropout(0.1))

    model.add(
        Conv2D(128, (3, 3),
               activation='relu',
               kernel_initializer=initializers.RandomNormal(stddev=0.001)))