def __init__(self, conf):
        self.conf = conf
            
        self.hps = self.conf['hps']
        self.nn_arch = self.conf['nn_arch']
        self.model_loading = self.conf['model_loading']

        if self.model_loading:
            self.digit_classificaton_model = load_model(self.MODEL_PATH, custom_objects={'RBM': RBM})
            self.digit_classificaton_model.summary()
            self.rbm = self.digit_classificaton_model.get_layer('rbm_1')
        else:        
            # Design the model.
            input_image = Input(shape=(self.IMAGE_SIZE,))
            x = Lambda(lambda x: x/255)(input_image)
            
            # RBM layer.
            self.rbm = RBM(self.conf['rbm_hps'], self.nn_arch['output_dim'], name='rbm') # Name?
            x = self.rbm(x) #?
            
            # Softmax layer.
            output = Dense(10, activation='softmax')(x)
            
            # Create a model.
            self.digit_classificaton_model = Model(inputs=[input_image], outputs=[output])
            
            opt = optimizers.Adam(lr=self.hps['lr']
                                    , beta_1=self.hps['beta_1']
                                    , beta_2=self.hps['beta_2']
                                    , decay=self.hps['decay'])
            
            self.digit_classificaton_model.compile(optimizer=opt, loss='categorical_crossentropy')
            self.digit_classificaton_model.summary() 
Beispiel #2
0
    def create(self):
        """
            Creates the VGG16 network achitecture and loads the pretrained weights.

            Args:   None
            Returns:   None
        """
        model = self.model = Sequential()
#        model.add(Lambda(vgg_preprocess, input_shape=(3,224,224), output_shape=(3,224,224)))
        model.add(Lambda(vgg_preprocess, input_shape=(224,224, 3)))

        self.ConvBlock(2, 64)
        self.ConvBlock(2, 128)
        self.ConvBlock(3, 256)
        self.ConvBlock(3, 512)
        self.ConvBlock(3, 512)

        model.add(Flatten())
        self.FCBlock()
        self.FCBlock()
        model.add(Dense(1000, activation='softmax'))

        fname = 'vgg16.h5'
        fname = 'vgg16_weights_tf_dim_ordering_tf_kernels.h5'
        model.load_weights(get_file(fname, self.FILE_PATH+fname, cache_subdir='models'))
def res_block(x_in, filters, scaling):
    x = Conv2D(filters, 3, padding='same', activation='relu')(x_in)
    x = Conv2D(filters, 3, padding='same')(x)
    if scaling:
        x = Lambda(lambda t: t * scaling)(x)
    x = Add()([x_in, x])
    return x
Beispiel #4
0
    def initialize(self, inputs):
        scale = 8
        num_filters = 32
        num_residual_blocks = 32
        res_block_expansion = 6

        # main branch (revise padding)
        m = conv2d_weight_norm(inputs, num_filters, 3, padding='valid')
        for i in range(num_residual_blocks):
            m = self.res_block_b(m,
                                 num_filters,
                                 res_block_expansion,
                                 kernel_size=3,
                                 scaling=None)
        m = Lambda(lambda x: tf.pad(
            x, tf.constant([[0, 0], [1, 1], [1, 1], [0, 0]]), 'SYMMETRIC'))(m)
        m = conv2d_weight_norm(m, 3 * scale**2, 3, padding='same')
        m = self.SubpixelConv2D(scale)(m)

        # skip branch
        #         s = Lambda(lambda x: tf.pad(x, tf.constant([[0, 0], [1, 1], [1, 1], [0, 0]])))(inputs)
        s = conv2d_weight_norm(inputs, 3 * scale**2, 5, padding='same')
        s = self.SubpixelConv2D(scale)(s)

        x = Add()([m, s])
        print(x.shape)
        return x
Beispiel #5
0
def upsampler(x, scale, num_feats, name):
    up_name = name
    _scale = int(math.log(scale,2))
    for i in range(_scale):
        x = Conv2D(num_feats*4, 3, padding='same', name=up_name+'/up'+str(i+1)+'/conv')(x)
        x = Lambda(pixel_shuffle(scale=2), name=up_name+'/up'+str(i+1)+'/pixel_shuffle')(x)
    return x
Beispiel #6
0
def default_n_linear(num_outputs):
    img_in = Input(shape=(120, 160, 3), name='img_in')
    x = img_in
    x = Cropping2D(cropping=((60, 0), (0, 0)))(x)  # trim 60 pixels off top
    x = Lambda(lambda x: x / 127.5 - 1.)(x)  # normalize and re-center
    x = Convolution2D(24, (5, 5), strides=(2, 2), activation='relu')(x)
    x = Convolution2D(32, (5, 5), strides=(2, 2), activation='relu')(x)
    x = Convolution2D(64, (5, 5), strides=(1, 1), activation='relu')(x)
    x = Convolution2D(64, (3, 3), strides=(1, 1), activation='relu')(x)
    x = Convolution2D(64, (3, 3), strides=(1, 1), activation='relu')(x)

    x = Flatten(name='flattened')(x)
    x = Dense(100, activation='relu')(x)
    x = Dropout(.1)(x)
    x = Dense(50, activation='relu')(x)
    x = Dropout(.1)(x)

    outputs = []

    for i in range(num_outputs):
        outputs.append(
            Dense(1, activation='linear', name='n_outputs' + str(i))(x))

    model = Model(inputs=[img_in], outputs=outputs)

    model.compile(optimizer='adam', loss='mse')

    return model
Beispiel #7
0
    def init_model(self, input_shape: int, n_actions: int):
        inp = Input(shape=(input_shape, ))
        layer_shared1 = Dense(64, activation='relu')(inp)
        layer_shared1 = BatchNormalization()(layer_shared1)
        layer_shared2 = Dense(64, activation='relu')(layer_shared1)
        layer_shared2 = BatchNormalization()(layer_shared2)

        layer_v1 = Dense(64, activation='relu')(layer_shared2)
        layer_v1 = BatchNormalization()(layer_v1)
        layer_a1 = Dense(64, activation='relu')(layer_shared2)
        layer_a1 = BatchNormalization()(layer_a1)
        # the value layer ouput is a scalar value
        layer_v2 = Dense(1, activation='linear')(layer_v1)
        # The advantage function subtracts the value of the state from the Q
        # function to obtain a relative measure of the importance of each action.
        layer_a2 = Dense(n_actions, activation='linear')(layer_a1)

        # the q layer combines the two streams of value and advantage function
        # the lambda functional layer can perform lambda expressions on keras layers
        # read more here : https://keras.io/api/layers/core_layers/lambda/
        # the lambda equation is defined in https://arxiv.org/pdf/1511.06581.pdf on equation (9)
        layer_q = Lambda(lambda x: x[0][:] + x[1][:] - K.mean(x[1][:]),
                         output_shape=(n_actions, ))([layer_v2, layer_a2])

        self.model = Model(inp, layer_q)
        self.model.compile(optimizer=Adam(lr=self.alpha), loss='mse')
Beispiel #8
0
def build_model():
    """
        Build keras model
    """
    # Attention la couche lambda peut poser problemes a certains outils
    # Il peut etre necessaire de la supprimer. Dans ce cas augmenter
    # le nombre d epochs. Mais cela ne sera pas suffisant pour finir le
    # premier circuit.
    model = Sequential()
    model.add(Lambda(lambda x: (x / 127.5) - 1., input_shape=(160, 320, 3)))
    model.add(
        Cropping2D(cropping=((70, 25), (0, 0)), input_shape=(160, 320, 3)))
    model.add(Conv2D(8, 9, strides=(4, 4), padding="same", activation="elu"))
    model.add(Conv2D(16, 5, strides=(2, 2), padding="same", activation="elu"))
    model.add(Conv2D(32, 4, strides=(1, 1), padding="same", activation="elu"))
    model.add(Flatten())
    model.add(Dropout(.6))
    model.add(Dense(1024, activation="elu"))
    model.add(Dropout(.3))
    model.add(Dense(1))

    #ada = optimizers.Adagrad(lr=0.001)
    model.compile(loss="mse",
                  optimizer="adam",
                  metrics=['accuracy', 'mean_squared_error'])

    return model
Beispiel #9
0
def step(x):
    """Computes step (Heaviside) of x element-wise.
       H(x) = 0 if x<=0
       H(x) = 1 if x>0

    # Arguments
        x: Functional object.

    # Returns
        A new functional object.
    """
    validate_functional(x)

    lmbd = []
    for i in range(len(x.outputs)):
        lmbd.append(
            Lambda(
                lambda x: K.cast(K.greater(x, 0.0), x.dtype), 
                name=graph_unique_name("step")
            )
        )
        
    Functional = x.get_class()
    res = Functional(
        inputs = x.inputs.copy(),
        outputs = _apply_operation(lmbd, x),
        layers = lmbd
    )
    return res
Beispiel #10
0
    def test_2nd_order_gradient_through_updated_model(self):
        # Given
        initial_model = Sequential([
            Dense(1, use_bias=False, kernel_initializer='ones', input_shape=(1,)),
            Lambda(lambda x: x ** 2)
        ])
        x = np.array([[3]])

        updated_model = clone_model(initial_model)

        # When
        with tf.GradientTape() as outer_tape:
            take_n_gradient_step(
                initial_model,
                updated_model,
                n_step=1,
                alpha=1.0,
                loss=(lambda y, p: p),
                data_x=x,
                data_y=x
            )
            yp = updated_model(x)
        grad_of_grads = outer_tape.gradient(yp, initial_model.trainable_variables)

        # Then
        self.assertEqual(5202, grad_of_grads[0])
Beispiel #11
0
    def compile_model(self, optimizer_name, optimizer_args, rdrop_alpha=None):
        logger.info("compiling model...")
        with self.get_scope():
            classify_output = Input(shape=(self.label_num,) if self.multi_label else (), name='classify_output', dtype=tf.float32)
            inputs = self.nn_model.inputs
            output = self.nn_model.output
            loss_input = [classify_output, output]
            if rdrop_alpha:
                output1 = self.nn_model(inputs)
                loss_input.append(output1)
                output = Lambda(function=lambda x: sum(x) / len(x), name="avg_pool_layer")([output, output1])
            self.train_model = Model(inputs + [classify_output], output, name="train_model")


        loss_layer = build_classify_loss_layer(multi_label=self.multi_label, rdrop_alpha=rdrop_alpha)
        loss = loss_layer(loss_input)
        self.train_model.add_loss(loss)

        accuracy_func = binary_accuracy if self.multi_label else sparse_categorical_accuracy
        metric_layer = MetricLayer(accuracy_func, name="metric_layer")
        accuracy = metric_layer([classify_output, output])
        self.train_model.add_metric(accuracy, aggregation="mean", name="accuracy")

        optimizer = OptimizerFactory.create(optimizer_name, optimizer_args)
        self.train_model.compile(optimizer=optimizer)
        logger.info("training model's summary:")
        self.train_model.summary(print_fn=logger.info)
        self._update_model_dict("train", self.train_model)
Beispiel #12
0
 def upsample(self, x_in, name_up):
     x = Conv2D(self.n_filters, kernel_size=3, padding='same',
                kernel_initializer=self.init_kernel)(x_in)
     x = Lambda(self.pixel_shuffle(scale=2))(x)
     x = LeakyReLU(0.2)(x)
     
     return x
Beispiel #13
0
def Mildnet_vgg16():
    vgg_model = VGG16(weights="imagenet",
                      include_top=False,
                      input_shape=(224, 224, 3))

    for layer in vgg_model.layers[:10]:
        layer.trainable = False

    intermediate_layer_outputs = get_layers_output_by_name(
        vgg_model,
        ["block1_pool", "block2_pool", "block3_pool", "block4_pool"])
    convnet_output = GlobalAveragePooling2D()(vgg_model.output)
    for layer_name, output in intermediate_layer_outputs.items():
        output = GlobalAveragePooling2D()(output)
        convnet_output = concatenate([convnet_output, output])

    convnet_output = Dense(2048, activation='relu')(convnet_output)
    convnet_output = Dropout(0.6)(convnet_output)
    convnet_output = Dense(2048, activation='relu')(convnet_output)
    convnet_output = Lambda(lambda x: K.l2_normalize(x, axis=1))(
        convnet_output)

    final_model = tf.keras.models.Model(inputs=vgg_model.input,
                                        outputs=convnet_output)

    return final_model
Beispiel #14
0
def Mildnet_mobilenet():
    vgg_model = MobileNet(weights=None,
                          include_top=False,
                          input_shape=(224, 224, 3))
    intermediate_layer_outputs = get_layers_output_by_name(
        vgg_model, [
            "conv_dw_1_relu", "conv_dw_2_relu", "conv_dw_4_relu",
            "conv_dw_6_relu", "conv_dw_12_relu"
        ])
    convnet_output = GlobalAveragePooling2D()(vgg_model.output)
    for layer_name, output in intermediate_layer_outputs.items():
        output = GlobalAveragePooling2D()(output)
        convnet_output = concatenate([convnet_output, output])

    convnet_output = GlobalAveragePooling2D()(vgg_model.output)
    convnet_output = Dense(1024, activation='relu')(convnet_output)
    convnet_output = Dropout(0.5)(convnet_output)
    convnet_output = Dense(1024, activation='relu')(convnet_output)
    convnet_output = Lambda(lambda x: K.l2_normalize(x, axis=1))(
        convnet_output)

    first_input = Input(shape=(224, 224, 3))
    second_input = Input(shape=(224, 224, 3))

    final_model = tf.keras.models.Model(
        inputs=[first_input, second_input, vgg_model.input],
        outputs=convnet_output)

    return final_model
Beispiel #15
0
    def Create_CNN(self, inp, name_suffix):
        """
        """
        x = self.embedding(inp)
        if self.emb_dropout > 0:
            x = SpatialDropout1D(self.emb_dropout)(x)
        # if self.char_split:
        #     # First conv layer
        #     x = Conv1D(filters=128, kernel_size=3, strides=2, padding="same")(x)

        cnn_list = []
        rnn_list = []
        for filter_size in self.filter_size:
            if filter_size > 0:
                conc = self.ConvBlock(x, filter_size)
                cnn_list.append(conc)
        for rnn_unit in self.rnn_units:
            if rnn_unit > 0:
                rnn_maps = Bidirectional(GRU(rnn_unit, return_sequences=True, \
                            dropout=self.rnn_input_dropout, recurrent_dropout=self.rnn_state_dropout))(x)
                conc = self.pooling_blend(rnn_maps)
                rnn_list.append(conc)

        conc_list = cnn_list + rnn_list
        if len(conc_list) == 1:
            conc = Lambda(lambda x: x,
                          name='RCNN_CONC' + name_suffix)(conc_list)
        else:
            conc = Concatenate(name='RCNN_CONC' + name_suffix)(conc_list)
        return conc
Beispiel #16
0
def _apply_function(x, fname, **kwargs):
    """Apply `fname` function to x element-wise.

    # Arguments
        x: Functional object.

    # Returns
        A new functional object.
    """
    validate_functional(x)

    fun = get_activation(fname)
    lmbd = []
    for i in range(len(x.outputs)):
        lmbd.append(
            Lambda(
                lambda x: fun(x, **kwargs),
                name=graph_unique_name("{}".format(fname))
            )
        )
    Functional = x.get_class()
    res = Functional(
        inputs = x.inputs.copy(),
        outputs = _apply_operation(lmbd, x),
        layers = lmbd
    )
    return res
Beispiel #17
0
    def Create_CNN(self):
        """
        """
        inp = Input(shape=(self.max_len, ))
        embedding = Embedding(self.max_token,
                              self.embedding_dim,
                              weights=[self.embedding_weight],
                              trainable=not self.fix_wv_model)
        x = embedding(inp)
        if self.emb_dropout > 0:
            x = SpatialDropout1D(self.emb_dropout)(x)
        # if self.char_split:
        #     # First conv layer
        #     x = Conv1D(filters=128, kernel_size=3, strides=2, padding="same")(x)

        cnn_list = []
        rnn_list = []
        for filter_size in self.filter_size:
            if filter_size > 0:
                conc = self.ConvBlock(x, filter_size)
                cnn_list.append(conc)
        for rnn_unit in self.context_vector_dim:
            if rnn_unit > 0:
                rnn_maps = Bidirectional(GRU(rnn_unit, return_sequences=True, \
                            dropout=self.rnn_input_dropout, recurrent_dropout=self.rnn_state_dropout))(x)
                conc = self.pooling_blend(rnn_maps)
                rnn_list.append(conc)

        conc_list = cnn_list + rnn_list
        if len(conc_list) == 1:
            conc = Lambda(lambda x: x, name='RCNN_CONC')(conc_list)
        else:
            conc = Concatenate(name='RCNN_CONC')(conc_list)

        # conc = self.pooling_blend(x)
        if self.separate_label_layer:
            for i in range(self.num_classes):
                full_connect = self.full_connect_layer(conc)
                proba = Dense(1, activation="sigmoid")(full_connect)
                if i == 0:
                    outp = proba
                else:
                    outp = concatenate([outp, proba], axis=1)
        else:
            if self.hidden_dim[0] > 0:
                full_connect = self.full_connect_layer(conc)
            else:
                full_connect = conc
            # full_conv_0 = self.act_blend(full_conv_pre_act_0)
            # full_conv_pre_act_1 = Dense(self.hidden_dim[1])(full_conv_0)
            # full_conv_1 = self.act_blend(full_conv_pre_act_1)
            # flat = Flatten()(conc)
            outp = Dense(6, activation="sigmoid")(full_connect)

        model = Model(inputs=inp, outputs=outp)
        # print (model.summary())
        model.compile(optimizer="adam",
                      loss="binary_crossentropy",
                      metrics=["accuracy"])
        return model
Beispiel #18
0
def outer(a, b):
    """outer product of two `Functional` objects.

    # Arguments
        a, b: outer(a,b)
        Note that at least one of them should be of type Functional.

    # Returns
        A Functional.
    """
    validate_functional(a)
    validate_functional(b)
    layers = []
    outputs = []
    for a_out in a.outputs:
        for b_out in b.outputs:
            a_shape = a_out.shape.as_list()
            b_shape = b_out.shape.as_list()
            a_exp = len(a_shape)
            b_exp = 1 if b_shape[0] is None else 0
            name = graph_unique_name("outer")
            layers.append(
                Lambda(lambda ys: multiply(expand_dims(ys[0], a_exp),
                                           expand_dims(ys[1], b_exp)),
                       name=name))
            net_output = layers[-1]([a_out, b_out])
            layers.append(Flatten())
            outputs.append(layers[-1](net_output))
    # return the functional
    assert a.get_class() == b.get_class()
    Functional = a.get_class()
    res = Functional(inputs=unique_tensors(a.inputs.copy() + b.inputs.copy()),
                     outputs=outputs,
                     layers=layers)
    return res
Beispiel #19
0
def standardUnet(width, height, chann, nc):    

    inputs = Input((height, width, chann))
    # image normalization between 0 and 1
    s = Lambda(lambda x: x / 255) (inputs)


    c1 = Conv2D(16, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same') (s)
    #c1 = Dropout(0.1) (c1)
    c1 = Conv2D(16, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same') (c1)
    p1 = MaxPooling2D((2, 2)) (c1)

    c2 = Conv2D(32, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same') (p1)
    #c2 = Dropout(0.1) (c2)
    c2 = Conv2D(32, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same') (c2)
    p2 = MaxPooling2D((2, 2)) (c2)

    c3 = Conv2D(64, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same') (p2)
    #c3 = Dropout(0.2) (c3)
    c3 = Conv2D(64, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same') (c3)
    p3 = MaxPooling2D((2, 2)) (c3)

    c4 = Conv2D(128, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same') (p3)
    #c4 = Dropout(0.2) (c4)
    c4 = Conv2D(128, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same') (c4)
    p4 = MaxPooling2D(pool_size=(2, 2)) (c4)

    c5 = Conv2D(256, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same') (p4)
    #c5 = Dropout(0.3) (c5)
    c5 = Conv2D(256, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same') (c5)

    u6 = Conv2DTranspose(128, (2, 2), strides=(2, 2), padding='same') (c5)
    u6 = concatenate([u6, c4])
    c6 = Conv2D(128, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same') (u6)
    #c6 = Dropout(0.2) (c6)
    c6 = Conv2D(128, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same') (c6)

    u7 = Conv2DTranspose(64, (2, 2), strides=(2, 2), padding='same') (c6)
    u7 = concatenate([u7, c3])
    c7 = Conv2D(64, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same') (u7)
    #c7 = Dropout(0.2) (c7)
    c7 = Conv2D(64, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same') (c7)

    u8 = Conv2DTranspose(32, (2, 2), strides=(2, 2), padding='same') (c7)
    u8 = concatenate([u8, c2])
    c8 = Conv2D(32, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same') (u8)
    #c8 = Dropout(0.1) (c8)
    c8 = Conv2D(32, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same') (c8)

    u9 = Conv2DTranspose(16, (2, 2), strides=(2, 2), padding='same') (c8)
    u9 = concatenate([u9, c1], axis=3)
    c9 = Conv2D(16, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same') (u9)
    #c9 = Dropout(0.1) (c9)
    c9 = Conv2D(16, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same') (c9)

    outputs = Conv2D(nc, (1, 1), activation='softmax') (c9)

    model = Model(inputs=inputs, outputs=outputs)

    return model
Beispiel #20
0
def getitem(x, item):
    """returns specific item of a tensor (Functional).

    # Arguments
        item: Item list.

    # Returns
        A new functional object.
    """
    validate_functional(x)

    in_item = item
    print(in_item)
    if not isinstance(in_item, tuple):
        in_item = (in_item, )
    print(in_item)

    itms = (slice(None, None, None), )
    for it in in_item:
        itms += (slice(it, it + 1) if isinstance(it, int) else it, )

    lmbd = []
    ys = []
    for y in x.outputs:
        l = Lambda(lambda xx: xx[itms], name=graph_unique_name("slice"))
        lmbd.append(l)
        ys.append(l(y))

    Functional = x.get_class()
    res = Functional(inputs=x.inputs.copy(), outputs=ys, layers=lmbd)
    return res
Beispiel #21
0
    def __init__(self,
                 input_data_shape,
                 num_classes,
                 model_name='resnet50',
                 trainable_layers_amount=1):
        super(ImageModel, self).__init__()

        self.num_classes = num_classes
        self.model_name = model_name
        self.trainable_layers_amount = trainable_layers_amount
        self.input_data_shape = input_data_shape

        if self.model_name == 'resnet50':
            self.base_model = ResNet50(include_top=False,
                                       weights='imagenet',
                                       input_tensor=None,
                                       input_shape=self.input_data_shape)
            # Avoid training layers in resnet model.
            layers = self.base_model.layers
            print("Layers name")
            for layer in layers:
                print(layer.name)
                layer.trainable = False
            print("Making layers trainable")
            for layer in layers[-trainable_layers_amount:]:
                print(layer.name)
                layer.trainable = True

        x0 = Input(shape=self.input_data_shape)
        x1 = Lambda(preprocess_input, output_shape=self.input_data_shape)(x0)
        x2 = self.base_model(x1)
        x3 = GlobalAveragePooling2D()(x2)
        x4 = Dense(1024, activation='relu')(x3)
        x5 = Dense(num_classes, activation='softmax', name='softmax')(x4)
        self.model = Model(inputs=x0, outputs=x5)
Beispiel #22
0
def dot(f, other):
    """Dot product of two `Functional` objects.

    # Arguments
        f: Functional object.
        other: A python number or a tensor or a functional object.

    # Returns
        A Functional.
    """
    validate_functional(f)
    validate_functional(other)
    assert len(f.outputs) == len(other.outputs)

    outputs = []
    layers = []
    for fl, fr in zip(f.outputs, other.outputs):
        assert fl.shape.as_list() == fr.shape.as_list(),\
            'Expected equal dimensions for output of functionals. '
        l = Lambda(lambda x: K.reshape(
            tf.math.reduce_sum(x * fr, list(range(1, len(fl.shape)))), [-1, 1]
        ),
                   name=graph_unique_name("dot"))
        layers += [l]
        outputs += [l(fl)]

    inputs = to_list(f.inputs) + to_list(other.inputs)
    Functional = f.get_class()
    res = Functional(inputs=unique_tensors(inputs),
                     outputs=outputs,
                     layers=layers)
    return res
Beispiel #23
0
    def initialize(self, inputs):
        filter_set_1 = self.conv_prelu(inputs, 96, (3, 3))
        filter_set_2 = self.conv_prelu(filter_set_1, 76, (3, 3))
        filter_set_3 = self.conv_prelu(filter_set_2, 65, (3, 3))
        filter_set_4 = self.conv_prelu(filter_set_3, 55, (3, 3))
        filter_set_5 = self.conv_prelu(filter_set_4, 47, (3, 3))
        filter_set_6 = self.conv_prelu(filter_set_5, 39, (3, 3))
        filter_set_7 = self.conv_prelu(filter_set_6, 32, (3, 3))

        concat = Concatenate(axis=3)([
            filter_set_1, filter_set_2, filter_set_3, filter_set_4,
            filter_set_5, filter_set_6, filter_set_7
        ])

        a1 = self.conv_prelu(concat, 64, (1, 1))
        b1 = self.conv_prelu(concat, 32, (1, 1))
        b2 = self.conv_prelu(b1, 32, (1, 1))

        concat_2 = Concatenate(axis=3)([a1, b2])

        input_depth = int(concat_2.shape[-1])
        scale = 8
        conv = self.conv_prelu(concat_2, scale * scale * input_depth, (3, 3))
        l = Lambda(lambda x: tf.depth_to_space(x, scale))(conv)
        l = Conv2D(3, (1, 1), padding='same')(l)

        upsampling = UpSampling2D(size=(8, 8),
                                  interpolation='bilinear')(inputs)

        add = Add()([upsampling, l])

        return add
Beispiel #24
0
def diag_part(f):
    """Diag_part operation returns diagonal part of outputs of (None,N,N) functional.

    # Arguments
        f: Functional object.

    # Returns
        A Functional.
    """
    validate_functional(f)

    lmbd = []
    outputs = []
    for o in f.outputs:
        assert len(o.shape) == 3, \
            'Exptected output dimension to be (None, N, N)'
        dim = o.shape[-1]
        l = Lambda(lambda x: tf.linalg.diag_part(x),
                   name=graph_unique_name("diag_part"))
        lmbd += [l]
        outputs += [l(o)]

    Functional = f.get_class()
    res = Functional(inputs=f.inputs.copy(), outputs=outputs, layers=lmbd)
    return res
 def __init__(self):
     super().__init__([
         Input(shape=(100, 100, 3)),
         Flatten(),
         Dense(100),
         Lambda(lambda x: tf.math.l2_normalize(x, axis=1))
     ])
Beispiel #26
0
def monomial(f, p=10):
    """Apply monomial feature transformation to the `Variable` objects..

    # Arguments
        p: (Int, list) monial powers to be considered.

    # Returns
        A Functional.
    """
    validate_variable(f)
    if isinstance(p, int):
        p = list(range(1, p + 1))
    else:
        assert isinstance(p, list)
    layers = []
    outputs = []
    for fi in f.outputs:
        f_dim = fi.shape.as_list()
        tile_dim = (len(f_dim) - 1) * [1] + [len(p)]
        layers.append(
            Lambda(lambda ys: tf_pow(tf_tile(ys, tile_dim), p),
                   name=graph_unique_name("monomials")))
        outputs.append(layers[-1](fi))

    Functional = f.get_class()
    res = Functional(inputs=unique_tensors(f.inputs.copy()),
                     outputs=outputs,
                     layers=layers)

    return res
Beispiel #27
0
    def call(self, inputs, mask=None, **kwargs):

        input_fw = inputs
        input_bw = inputs
        for i in range(self.layers):
            output_fw = self.fw_lstm[i](input_fw)
            output_bw = self.bw_lstm[i](input_bw)
            output_bw = Lambda(lambda x: K.reverse(x, 1),
                               mask=lambda inputs, mask: mask)(output_bw)

            if i >= self.layers - self.res_layers:
                output_fw += input_fw
                output_bw += input_bw
            input_fw = output_fw
            input_bw = output_bw

        output_fw = input_fw
        output_bw = input_bw

        if self.merge_mode == "fw":
            output = output_fw
        elif self.merge_mode == "bw":
            output = output_bw
        elif self.merge_mode == 'concat':
            output = K.concatenate([output_fw, output_bw])
        elif self.merge_mode == 'sum':
            output = output_fw + output_bw
        elif self.merge_mode == 'ave':
            output = (output_fw + output_bw) / 2
        elif self.merge_mode == 'mul':
            output = output_fw * output_bw
        elif self.merge_mode is None:
            output = [output_fw, output_bw]

        return output
def MultiTaskLayer(x, derive_root, input_type):
    classes_key = 30 if input_type.startswith(
        'spelling') else 24  # Major keys: 0-11, Minor keys: 12-23
    classes_degree = 21  # 7 degrees * 3: regular, diminished, augmented
    classes_root = 35 if input_type.startswith(
        'spelling') else 12  # the twelve notes without enharmonic duplicates
    classes_quality = 12  # ['M', 'm', 'd', 'a', 'M7', 'm7', 'D7', 'd7', 'h7', 'Gr+6', 'It+6', 'Fr+6']
    classes_inversion = 4  # root position, 1st, 2nd, and 3rd inversion (the last only for seventh chords)

    o_key = TimeDistributed(Dense(classes_key, activation='softmax'),
                            name='key')(x)
    z = Concatenate()([x, o_key])
    o_dg1 = TimeDistributed(Dense(classes_degree, activation='softmax'),
                            name='degree_1')(z)
    o_dg2 = TimeDistributed(Dense(classes_degree, activation='softmax'),
                            name='degree_2')(z)
    o_qlt = TimeDistributed(Dense(classes_quality, activation='softmax'),
                            name='quality')(x)
    o_inv = TimeDistributed(Dense(classes_inversion, activation='softmax'),
                            name='inversion')(x)
    if derive_root and input_type.startswith('pitch'):
        o_roo = Lambda(find_root_pitch, name='root')([o_key, o_dg1, o_dg2])
    else:
        o_roo = TimeDistributed(Dense(classes_root, activation='softmax'),
                                name='root')(x)
    return [o_key, o_dg1, o_dg2, o_qlt, o_inv, o_roo]
Beispiel #29
0
 def build(self, input_shape):
     self.T = Dense(units=input_shape[-1],
                    activation='sigmoid',
                    bias_initializer=self.bias)
     self.H = Dense(units=input_shape[-1], activation='relu')
     self.cary_gate = Lambda(lambda x: 1.0 - x,
                             output_shape=(input_shape[-1], ))
Beispiel #30
0
 def build_model(self, pos_mode=0, use_mask=False, active_layers=999):
     v_input = Input(shape=(self.seq_len, self.d_feature), name='v_input')
     d0 = TimeDistributed(Dense(self.d_model))(v_input)
     pos_input = Input(shape=(self.seq_len, ),
                       dtype='int32',
                       name='pos_input')
     if pos_mode == 0:  # use fixed pos embedding
         pos_embedding = Embedding(self.seq_len, self.d_model, trainable=False,\
             weights=[GetPosEncodingMatrix(self.seq_len, self.d_model)])
         p0 = pos_embedding(pos_input)
     elif pos_mode == 1:  # use trainable pos embedding
         pos_embedding = Embedding(self.seq_len, self.d_model)
         p0 = pos_embedding(pos_input)
     else:  # no pos embedding
         p0 = None
     if p0 != None:
         combine_input = Add()([d0, p0])
     else:
         combine_input = d0  # no pos
     sub_mask = None
     if use_mask:
         sub_mask = Lambda(GetSubMask)(pos_input)
     enc_output = self.encoder(combine_input,
                               mask=sub_mask,
                               active_layers=active_layers)
     # score
     time_score_dense1 = TimeDistributed(
         Dense(self.d_model, activation='tanh'))(enc_output)
     time_score_dense2 = TimeDistributed(Dense(1))(time_score_dense1)
     flat = Flatten()(time_score_dense2)
     score_output = Activation(activation='softmax')(flat)
     self.model = Model([pos_input, v_input], score_output)
     return self.model