コード例 #1
0
def RCL_block(l,activation_function=PReLU(),features=64,kernel_size=kernelSz,name="RCL"):
    #INITIAL CONVOLUTION BLOCK
    conv1 = Conv1D(features, kernel_size, border_mode='same',name=name)
    stack1 = conv1(l)
    stack2 = activation_function(stack1)
    stack3 = BatchNormalization()(stack2)

    #UNROLLED RECURRENT BLOCK(s)
    conv2 = Conv1D(features, kernel_size, border_mode='same', init = 'he_normal')
    stack4 = conv2(stack3)
    stack5 = merge([stack1, stack4], mode='sum')
    stack6 = activation_function(stack5)
    stack7 = BatchNormalization()(stack6)

    conv3 = Convolution1D_tied(features, kernel_size, border_mode='same', tied_to = conv2)
    stack8 = conv3(stack7)
    stack9 = merge([stack1, stack8], mode='sum')
    stack10 = activation_function(stack9)
    stack11 = BatchNormalization()(stack10)

    conv4 = Convolution1D_tied(features, kernel_size, border_mode='same', tied_to = conv2)
    stack12 = conv4(stack11)
    stack13 = merge([stack1, stack12], mode='sum')
    stack14 = activation_function(stack13)
    stack15 = BatchNormalization()(stack14)

    return stack15
コード例 #2
0
ファイル: testFConv.py プロジェクト: aasensio/DNHazel
    def defineNetwork(self):
        print("Setting up network...")

        conv = [None] * self.n_conv_layers
        deconv = [None] * self.n_conv_layers

        inputs = Input(shape=(self.nx, self.ny, self.n_diversity))
        conv[0] = Convolution2D(self.n_filters, 3, 3, activation='relu', subsample=(self.stride,self.stride), border_mode='same', init='he_normal')(inputs)
        for i in range(self.n_conv_layers-1):
            conv[i+1] = Convolution2D(self.n_filters, 3, 3, activation='relu', subsample=(self.stride,self.stride), border_mode='same', init='he_normal')(conv[i])

        deconv[0] = Deconvolution2D(self.n_filters, 3, 3, activation='relu', output_shape=(self.batch_size, self.nx, self.ny,self.n_filters), subsample=(self.stride,self.stride), border_mode='same', init='he_normal')(conv[-1])
        for i in range(self.n_conv_layers-1):
            if (i % self.skip_frequency == 0):
                x = Deconvolution2D(self.n_filters, 3, 3, output_shape=(self.batch_size,self.nx, self.ny,self.n_filters), activation='relu', subsample=(self.stride,self.stride), border_mode='same', init='he_normal')(deconv[i])                
                x = merge([conv[self.n_conv_layers-i-2], x], mode='sum')
                deconv[i+1] = Activation('relu')(x)

            else:
                deconv[i+1] = Deconvolution2D(self.n_filters, 3, 3, output_shape=(self.batch_size,self.nx, self.ny,self.n_filters), activation='relu', subsample=(self.stride,self.stride), border_mode='same', init='he_normal')(deconv[i])

        x = Deconvolution2D(1, 1, 1, output_shape=(self.batch_size,self.nx, self.ny, 1), activation='linear', subsample=(self.stride,self.stride), border_mode='same', init='he_normal')(deconv[-1])

        focused = Lambda(lambda x: x[:,:,:,0:1], output_shape=(self.nx, self.ny, 1))(inputs)

        final = merge([x, focused], 'sum')
        self.model = Model(input=inputs, output=final)

        self.model.load_weights("{0}_weights.hdf5".format(self.root))
コード例 #3
0
def inception_stem(input):
    c = Convolution2D(32, 3, 3, activation='relu', subsample=(2, 2))(input)
    # c = Convolution2D(32, 3, 3, activation='relu', subsample=(1, 1))(input)
    c = Convolution2D(32, 3, 3, activation='relu', )(c)
    c = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(c)

    c1 = MaxPooling2D((3, 3), strides=(2, 2))(c)
    c2 = Convolution2D(64, 3, 3, activation='relu', subsample=(2, 2))(c)

    m = merge([c1, c2], mode='concat', concat_axis=1)

    c1 = Convolution2D(64, 1, 1, activation='relu', border_mode='same')(m)
    c1 = Convolution2D(96, 3, 3, activation='relu', )(c1)

    c2 = Convolution2D(64, 1, 1, activation='relu', border_mode='same')(m)
    c2 = Convolution2D(64, 7, 1, activation='relu', border_mode='same')(c2)
    c2 = Convolution2D(64, 1, 7, activation='relu', border_mode='same')(c2)
    c2 = Convolution2D(96, 3, 3, activation='relu', border_mode='valid')(c2)

    m2 = merge([c1, c2], mode='concat', concat_axis=1)

    p1 = MaxPooling2D((3,3), strides=(2, 2), )(m2)
    p2 = Convolution2D(96, 3, 3, activation='relu', subsample=(2,2))(m2)

    m3 = merge([p1, p2], mode='concat', concat_axis=1)
    m3 = BatchNormalization()(m3)
    m3 = Activation('relu')(m3)
    return m3
コード例 #4
0
def Inception_A(Input):
    Input = Activation('relu')(Input)
    path1 = Convolution2D(32 // nb_filters_reduction_factor,
                          1, 1, border_mode='same', activation='relu')(Input)
    path1 = Convolution2D(48 // nb_filters_reduction_factor,
                          3, 3, border_mode='same', activation='relu')(path1)
    path1 = Convolution2D(64 // nb_filters_reduction_factor,
                          3, 3, border_mode='same', activation='relu')(path1)

    path2 = Convolution2D(32 // nb_filters_reduction_factor,
                          1, 1, border_mode='same', activation='relu')(Input)
    path2 = Convolution2D(32 // nb_filters_reduction_factor,
                          3, 3, border_mode='same', activation='relu')(path2)

    path3 = Convolution2D(32 // nb_filters_reduction_factor,
                          1, 1, border_mode='same', activation='relu')(Input)

    out = merge([path3, path1, path2], mode='concat', concat_axis=channel_axis)
    out = Convolution2D(384 // nb_filters_reduction_factor,
                        1, 1, border_mode='same')(out)
    out = Lambda(lambda x: x * alpha)(out)

    output = merge([out, Input], mode='sum')
    output = BatchNormalization(axis=channel_axis)(output)
    output = Activation('relu')(output)

    return output
コード例 #5
0
ファイル: ActorCritic.py プロジェクト: aravindr93/RL-tasks
    def __init__(self, state_dim, action_dim):
        ''' Create the Learner Networks | Three callable structures = net1, net2, actor
            net1 --> holds actor fixed and trains critic to match target values
            net2 --> holds critic fixed and trains actor to produce action that maximizes the Q value
            best_net --> for just keeping track of best_net so far
            actor --> can be used to predict the action given state
        '''

        # Make placeholders
        self.t_state = Input(shape=(state_dim,))
        self.t_exploration = Input(shape=(action_dim,))

        a1 = Dense(A_UNITS[0], activation=A_ACT[0])(self.t_state)
        a2 = Dense(A_UNITS[1], activation=A_ACT[1])(a1)
        a3 = Dense(output_dim=action_dim, activation=A_ACT[2])(a2)
        m1 = merge([self.t_exploration, a3], mode='sum')
        m2 = merge([self.t_state, m1], mode='concat')
        c1 = Dense(C_UNITS[0], activation=C_ACT[0])(m2)
        c2 = Dense(C_UNITS[1], activation=C_ACT[1])(c1)
        c3 = Dense(output_dim=1, activation=C_ACT[2])(c2)

        self.actor = Model(input=[self.t_state], output=[a3])
        self.net1  = Model(input=[self.t_state, self.t_exploration], output=[c3])
        self.net2  = Model(input=[self.t_state, self.t_exploration], output=[c3])

        # Compile networks
        opt_set1 = RMSprop(lr=LEARN_RATE)
        self.net1.compile(optimizer=opt_set1, loss='mse')
        opt_set2 = SGD(lr=LEARN_RATE)
        self.net2.compile(optimizer=opt_set2, loss='nmo')

        # Collect layers of actor and critic
        self.alayers = [1,2,3]
        self.clayers = [7,8,9]
コード例 #6
0
ファイル: ActorCritic.py プロジェクト: aravindr93/RL-tasks
    def __init__(self, state_dim, action_dim, param_dim):
        ''' Create the target networks | Two callable structures: actor and critic '''

        # Make placeholders
        self.t_state = Input(shape=(state_dim,))
        self.t_exploration = Input(shape=(action_dim,))
        self.t_param = Input(shape=(param_dim,))
        self.t_target = Input(shape=(1,))

        a1 = Dense(10, activation='relu')(self.t_state)
        a2 = Dense(10, activation='relu')(a1)
        a3 = Dense(output_dim=action_dim, activation='linear')(a2)
        m1 = merge([self.t_exploration, a3], mode='sum')
        m2 = merge([self.t_state, m1], mode='concat')
        c1 = Dense(10, activation='relu')(m2)
        c2 = Dense(5, activation='tanh')(c1)
        p1 = Dense(10, activation='relu')(self.t_param)
        p2 = Dense(5, activation='tanh')(p1)
        m3 = merge([c2, p2], mode='concat')
        c3 = Dense(10, activation='relu')(m3)
        c4 = Dense(5, activation='relu')(c3)
        c5 = Dense(output_dim=1, activation='linear')(c4)

        self.actor  = Model(input=[self.t_state], output=[a3])
        self.critic = Model(input=[self.t_state, self.t_exploration, self.t_param], output=[c5])
コード例 #7
0
ファイル: util_model.py プロジェクト: gmaher/tcl_code
def I2INet3D(input_shape=(64,64,64,1), Nfilters=32, l2_reg=0.0):

    inp = Input(shape=input_shape)

    #First convolution layer
    x = Conv3D(Nfilters,(3,3,3), padding='same',activation='relu')(inp)
    x = Conv3D(Nfilters,(3,3,3), padding='same',activation='relu')(x)
    out_1 = x
    x = AveragePooling3D()(x)

    #second convolution layer
    Nfilter2 = 4*Nfilters
    x = Conv3D(Nfilter2,(3,3,3), padding='same',activation='relu')(x)
    x = Conv3D(Nfilter2,(3,3,3), padding='same',activation='relu')(x)
    out_2 = x
    x = AveragePooling3D()(x)

    #third convolution layer
    Nfilter3 = 8*Nfilters
    x = Conv3D(Nfilter3,(3,3,3), padding='same',activation='relu')(x)
    x = Conv3D(Nfilter3,(3,3,3), padding='same',activation='relu')(x)
    x = Conv3D(Nfilter3,(3,3,3), padding='same',activation='relu')(x)
    out_3 = x
    x = AveragePooling3D()(x)

    #fourth convolution layer
    Nfilter4 = 16*Nfilters
    x = Conv3D(Nfilter4,(3,3,3), padding='same',activation='relu')(x)
    x = Conv3D(Nfilter4,(3,3,3), padding='same',activation='relu')(x)
    x = Conv3D(Nfilter4,(3,3,3), padding='same',activation='relu')(x)
    out_4 = UpSampling3D()(x)

    ####################
    # Second branch
    ####################
    s = merge([out_3,out_4], mode='concat', concat_axis=4)
    x = Conv3D(Nfilter4, (1,1,1), padding='same',activation='relu')(s)
    x = Conv3D(Nfilter3, (3,3,3), padding='same',activation='relu')(x)
    x = Conv3D(Nfilter3, (3,3,3), padding='same',activation='relu')(x)
    s_out_1 = Conv3D(1, (1,1,1), padding='same',activation='sigmoid')(x)

    #second upsample
    x = UpSampling3D()(x)
    s = merge([out_2,x], mode='concat', concat_axis=4)
    x = Conv3D(Nfilter3, (1,1,1), padding='same',activation='relu')(s)
    x = Conv3D(Nfilter2, (3,3,3), padding='same',activation='relu')(x)
    x = Conv3D(Nfilter2, (3,3,3), padding='same',activation='relu')(x)
    s_out_2 = Conv3D(1, (1,1,1), padding='same',activation='sigmoid')(x)

    #final upsample
    x = UpSampling3D()(x)
    s = merge([out_1,x], mode='concat', concat_axis=4)
    x = Conv3D(Nfilter2, (1,1,1), padding='same',activation='relu')(s)
    x = Conv3D(Nfilters, (3,3,3), padding='same',activation='relu')(x)
    x = Conv3D(Nfilters, (3,3,3), padding='same',activation='relu')(x)
    s_out_3 = Conv3D(1, (1,1,1), padding='same',activation='sigmoid')(x)

    i2i = Model(inp,[s_out_3,s_out_2,s_out_1])

    return i2i
コード例 #8
0
 def __init__(self):
     '''
     Constructor
     '''
     super().__init__()
     # input tensor
     lr_image = Input(shape=(None,None,4))
     # contraction part
     conv1 = Convolution2D(64, 7, 7, dim_ordering='tf', init=he_normal, activation=relu, border_mode='same', subsample=(2, 2))(lr_image)       
     conv2 = Convolution2D(128, 5, 5, dim_ordering='tf', init=he_normal, activation=relu, border_mode='same', subsample=(2, 2))(conv1)          
     # extension part
     # upconvolution level 2
     upconv2 = UpSampling2D(size=(2,2), dim_ordering='tf')(conv1)
     upconv2 = Convolution2D(128, 4, 4, dim_ordering='tf', init=he_normal, activation=relu, border_mode='same')(upconv2)
     upconv2 = Convolution2D(128, 3, 3, dim_ordering='tf', init=he_normal, activation=relu, border_mode='same')(upconv2)
     # upconvolution level 1
     upconv1 = merge([conv2, upconv2], mode='concat', concat_axis=-1)
     #TODO: Does -1 correspond to last axis?
     print('Does -1 correspond to last axis?')
     upconv1 = UpSampling2D(size=(2,2), dim_ordering='tf')(upconv1)
     upconv1 = Convolution2D(64, 4, 4, dim_ordering='tf', init=he_normal, activation=relu, border_mode='same')(upconv1)
     upconv1 = Convolution2D(64, 3, 3, dim_ordering='tf', init=he_normal, activation=relu, border_mode='same')(upconv1)
     # upconvolution level 0 (sr)
     sr_prediction = merge([conv1, upconv1], mode='concat', concat_axis=-1)
     #TODO: Does -1 correspond to last axis?
     print('Does -1 correspond to last axis?')
     sr_prediction = UpSampling2D(size=(2,2), dim_ordering='tf')(sr_prediction)
     sr_prediction = Convolution2D(4, 4, 4, dim_ordering='tf', init=he_normal, activation=relu, border_mode='same')(sr_prediction)
     sr_prediction = Convolution2D(4, 3, 3, dim_ordering='tf', init=he_normal, activation=relu, border_mode='same')(sr_prediction)
     # configuration
     self.model = Model(input=lr_image, output=sr_prediction)
     self.model.compile(optimizer='rmsprop', loss='mse')
コード例 #9
0
    def compile(self, optimizer, metrics=[]):
        metrics += [mean_q]  # register default metrics

        # Create target V model. We don't need targets for mu or L.
        self.target_V_model = clone_model(self.V_model, self.custom_model_objects)
        self.target_V_model.compile(optimizer='sgd', loss='mse')

        # Build combined model.
        observation_shape = self.V_model.input._keras_shape[1:]
        a_in = Input(shape=(self.nb_actions,), name='action_input')
        o_in = Input(shape=observation_shape, name='observation_input')
        L_out = self.L_model([a_in, o_in])
        V_out = self.V_model(o_in)
        mu_out = self.mu_model(o_in)
        A_out = NAFLayer(self.nb_actions)(merge([L_out, mu_out, a_in], mode='concat'))
        combined_out = merge([A_out, V_out], mode='sum')
        combined = Model(input=[a_in, o_in], output=combined_out)

        # Compile combined model.
        if self.target_model_update < 1.:
            # We use the `AdditionalUpdatesOptimizer` to efficiently soft-update the target model.
            updates = get_soft_target_model_updates(self.target_V_model, self.V_model, self.target_model_update)
            optimizer = AdditionalUpdatesOptimizer(optimizer, updates)
        
        def clipped_mse(y_true, y_pred):
            delta = K.clip(y_true - y_pred, self.delta_range[0], self.delta_range[1])
            return K.mean(K.square(delta), axis=-1)
        
        combined.compile(loss=clipped_mse, optimizer=optimizer, metrics=metrics)
        self.combined_model = combined

        self.compiled = True
コード例 #10
0
    def residual_block(l, increase_dim=False, first=False, filters=16):
        if increase_dim:
            first_stride = (2,2)
        else:
            first_stride = (1,1)

        if first:
            pre_act = l
        else:
            # BN -> ReLU
            bn = BatchNormalization(axis=1)(l)
            pre_act = Activation('relu')(bn)

        conv_1 = Convolution2D(filters, 3,3, init='he_normal', border_mode='same', subsample=first_stride, activation='linear')(pre_act)
        bn_1 = BatchNormalization(axis=1)(conv_1)
        relu_1 = Activation('relu')(bn_1)
        conv_2 = Convolution2D(filters, 3,3, init='he_normal', border_mode='same', activation='linear')(relu_1)

        # add shorcut
        if increase_dim:
            # projection shortcut
            projection = Convolution2D(filters, 1,1, subsample=(2,2), border_mode='same', activation='linear')(pre_act)
            block = merge([conv_2, projection], mode='sum')
        else:
            block = merge([conv_2, pre_act], mode='sum')

        return block
コード例 #11
0
ファイル: test_core.py プロジェクト: BigeyeDestroyer/keras
def test_merge_mask_2d():
    from keras.layers import Input, merge, Masking
    from keras.models import Model

    rand = lambda *shape: np.asarray(np.random.random(shape) > 0.5, dtype='int32')

    # inputs
    input_a = Input(shape=(3,))
    input_b = Input(shape=(3,))

    # masks
    masked_a = Masking(mask_value=0)(input_a)
    masked_b = Masking(mask_value=0)(input_b)

    # three different types of merging
    merged_sum = merge([masked_a, masked_b], mode='sum')
    merged_concat = merge([masked_a, masked_b], mode='concat', concat_axis=1)
    merged_concat_mixed = merge([masked_a, input_b], mode='concat', concat_axis=1)

    # test sum
    model_sum = Model([input_a, input_b], [merged_sum])
    model_sum.compile(loss='mse', optimizer='sgd')
    model_sum.fit([rand(2, 3), rand(2, 3)], [rand(2, 3)], nb_epoch=1)

    # test concatenation
    model_concat = Model([input_a, input_b], [merged_concat])
    model_concat.compile(loss='mse', optimizer='sgd')
    model_concat.fit([rand(2, 3), rand(2, 3)], [rand(2, 6)], nb_epoch=1)

    # test concatenation with masked and non-masked inputs
    model_concat = Model([input_a, input_b], [merged_concat_mixed])
    model_concat.compile(loss='mse', optimizer='sgd')
    model_concat.fit([rand(2, 3), rand(2, 3)], [rand(2, 6)], nb_epoch=1)
コード例 #12
0
ファイル: inception.py プロジェクト: 99sbr/Machine-Learning
def Stem(Input):
    print(Input)
    x = Convolution2D(32 // nb_filters_reduction_factor,
                      3, 3, subsample=(1, 1))(Input)
    x = Convolution2D(32 // nb_filters_reduction_factor, 3, 3)(x)
    x = Convolution2D(32 // nb_filters_reduction_factor, 3, 3)(x)
    x = Convolution2D(64 // nb_filters_reduction_factor,
                      3, 3, border_mode='same')(x)

    path1 = MaxPooling2D((3, 3), strides=(1, 1))(x)  # changed
    path2 = Convolution2D(96 // nb_filters_reduction_factor,
                          3, 3, subsample=(1, 1))(x)  # changed
    y = merge([path1, path2], mode='concat')

    a = Convolution2D(64 // nb_filters_reduction_factor,
                      1, 1, border_mode='same')(y)
    a = Convolution2D(64 // nb_filters_reduction_factor,
                      3, 1, border_mode='same')(a)
    a = Convolution2D(64 // nb_filters_reduction_factor,
                      1, 3, border_mode='same')(a)
    a = Convolution2D(96 // nb_filters_reduction_factor,
                      3, 3, border_mode='valid')(a)

    b = Convolution2D(64 // nb_filters_reduction_factor,
                      1, 1, border_mode='same')(y)
    b = Convolution2D(96 // nb_filters_reduction_factor,
                      3, 3, border_mode='valid')(b)

    z = merge([a, b], mode='concat')
    z1 = MaxPooling2D((3, 3), strides=(2, 2))(z)
    z2 = Convolution2D(192 // nb_filters_reduction_factor, 3,
                       3, subsample=(2, 2), border_mode='valid')(z)

    c = merge([z1, z2], mode='concat', concat_axis=-1)
    return c
コード例 #13
0
def block_inception_c(input):
    if K.image_dim_ordering() == "th":
        channel_axis = 1
    else:
        channel_axis = -1

    branch_0 = conv2d_bn(input, 256, 1, 1)

    branch_1 = conv2d_bn(input, 384, 1, 1)
    branch_10 = conv2d_bn(branch_1, 256, 1, 3)
    branch_11 = conv2d_bn(branch_1, 256, 3, 1)
    branch_1 = merge([branch_10, branch_11], mode='concat', concat_axis=channel_axis)


    branch_2 = conv2d_bn(input, 384, 1, 1)
    branch_2 = conv2d_bn(branch_2, 448, 3, 1)
    branch_2 = conv2d_bn(branch_2, 512, 1, 3)
    branch_20 = conv2d_bn(branch_2, 256, 1, 3)
    branch_21 = conv2d_bn(branch_2, 256, 3, 1)
    branch_2 = merge([branch_20, branch_21], mode='concat', concat_axis=channel_axis)

    branch_3 = AveragePooling2D((3, 3), strides=(1, 1), border_mode='same')(input)
    branch_3 = conv2d_bn(branch_3, 256, 1, 1)

    x = merge([branch_0, branch_1, branch_2, branch_3], mode='concat', concat_axis=channel_axis)
    return x
コード例 #14
0
    def get_similarities(self):
        if self._models is None:
            self._models = self.build()
            assert len(self._models) == 2, 'build() should make question and answer language models'

        if self._similarities is None:
            question_model, answer_model = self._models

            answers_use_question = len(answer_model.internal_input_shapes) == 2

            question = question_model(self.question, self.answer_good)

            if answers_use_question:
                good = answer_model([self.question, self.answer_good])
                bad = answer_model([self.question, self.answer_bad])
            else:
                good = answer_model([self.answer_good])
                bad = answer_model([self.answer_bad])

            similarity = self.get_similarity()
            good_sim = merge([question, good], mode=similarity, output_shape=lambda x: x[:-1])
            bad_sim = merge([question, bad], mode=similarity, output_shape=lambda x: x[:-1])

            self._similarities = [good_sim, bad_sim]

        return self._similarities
コード例 #15
0
ファイル: inceptionv4.py プロジェクト: titu1994/ML-Tools
def inception_stem(input): # Input (299,299,3)
    # Input Shape is 299 x 299 x 3 (th) or 3 x 299 x 299 (th)
    c = Convolution2D(32, 3, 3, activation='relu', subsample=(2,2))(input)
    c = Convolution2D(32, 3, 3, activation='relu', )(c)
    c = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(c)

    c1 = MaxPooling2D((3,3), strides=(2,2))(c)
    c2 = Convolution2D(96, 3, 3, activation='relu', subsample=(2,2))(c)

    m = merge([c1, c2], mode='concat', concat_axis=1)

    c1 = Convolution2D(64, 1, 1, activation='relu', border_mode='same')(m)
    c1 = Convolution2D(96, 3, 3, activation='relu', )(c1)

    c2 = Convolution2D(64, 1, 1, activation='relu', border_mode='same')(m)
    c2 = Convolution2D(64, 7, 1, activation='relu', border_mode='same')(c2)
    c2 = Convolution2D(64, 1, 7, activation='relu', border_mode='same')(c2)
    c2 = Convolution2D(96, 3, 3, activation='relu', border_mode='valid')(c2)

    m2 = merge([c1, c2], mode='concat', concat_axis=1)

    p1 = MaxPooling2D((3,3), strides=(2,2), )(m2)
    p2 = Convolution2D(192, 3, 3, activation='relu', subsample=(2,2))(m2)

    m3 = merge([p1, p2], mode='concat', concat_axis=1)
    return m3
コード例 #16
0
def get_unet():
    inputs = Input((1, img_rows, img_cols))
    conv1 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(inputs)
    conv1 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(conv1)
    pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)

    conv2 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(pool1)
    conv2 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(conv2)
    pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)

    conv3 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(pool2)
    conv3 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(conv3)
    pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)

    conv4 = Convolution2D(256, 3, 3, activation='relu', border_mode='same')(pool3)
    conv4 = Convolution2D(256, 3, 3, activation='relu', border_mode='same')(conv4)
    pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)

    conv5 = Convolution2D(512, 3, 3, activation='relu', border_mode='same')(pool4)
    conv5 = Convolution2D(512, 3, 3, activation='relu', border_mode='same')(conv5)

    pool5 = MaxPooling2D(pool_size=(2, 2))(conv5)
    poll5_flat = Flatten()(pool5)
    dense1 = Dense(1024, activation='relu')(poll5_flat)
    dense2 = Dense(512, activation='relu')(dense1)
    dense3 = Dense(1, activation='sigmoid', name='has_mask_output')(dense1)

    up6 = merge([UpSampling2D(size=(2, 2))(conv5), conv4], mode='concat', concat_axis=1)
    conv6 = Convolution2D(256, 3, 3, activation='relu', border_mode='same')(up6)
    conv6 = Convolution2D(256, 3, 3, activation='relu', border_mode='same')(conv6)

    up7 = merge([UpSampling2D(size=(2, 2))(conv6), conv3], mode='concat', concat_axis=1)
    conv7 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(up7)
    conv7 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(conv7)

    up8 = merge([UpSampling2D(size=(2, 2))(conv7), conv2], mode='concat', concat_axis=1)
    conv8 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(up8)
    conv8 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(conv8)

    up9 = merge([UpSampling2D(size=(2, 2))(conv8), conv1], mode='concat', concat_axis=1)
    conv9 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(up9)
    conv9 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(conv9)

    conv10 = Convolution2D(1, 1, 1, activation='sigmoid')(conv9)

    conv10_flat = Flatten()(conv10)

    dense3_repeat = Flatten()(RepeatVector(img_rows * img_cols)(dense3))

    merge_output = merge([conv10_flat, dense3_repeat], mode='mul')

    out = Reshape((1, img_rows, img_cols), name='main_output')(merge_output)

    model = Model(input=inputs, output=[out, dense3])

    model.compile(optimizer=Adam(lr=1e-5),
        loss={'main_output': dice_coef_loss, 'has_mask_output': 'binary_crossentropy'},
        metrics={'main_output': dice_coef, 'has_mask_output': 'accuracy'})

    return model
コード例 #17
0
    def build(self):
        assert self.config['question_len'] == self.config['answer_len']

        question = self.question
        answer = self.get_answer()

        # add embedding layers
        embedding = Embedding(self.config['n_words'], self.model_params.get('n_embed_dims', 100))
        question_embedding = embedding(question)
        answer_embedding = embedding(answer)

        # turn off layer updating
        embedding.params = []
        embedding.updates = []

        # dropout
        dropout = Dropout(0.25)
        question_dropout = dropout(question_embedding)
        answer_dropout = dropout(answer_embedding)

        # dense
        dense = TimeDistributed(Dense(self.model_params.get('n_hidden', 200), activation='tanh'))
        question_dense = dense(question_dropout)
        answer_dense = dense(answer_dropout)

        # regularization
        question_dense = ActivityRegularization(l2=0.0001)(question_dense)
        answer_dense = ActivityRegularization(l2=0.0001)(answer_dense)

        # dropout
        question_dropout = dropout(question_dense)
        answer_dropout = dropout(answer_dense)

        # cnn
        cnns = [Convolution1D(filter_length=filter_length,
                              nb_filter=self.model_params.get('nb_filters', 1000),
                              activation=self.model_params.get('conv_activation', 'relu'),
                              border_mode='same') for filter_length in [2, 3, 5, 7]]
        question_cnn = merge([cnn(question_dropout) for cnn in cnns], mode='concat')
        answer_cnn = merge([cnn(answer_dropout) for cnn in cnns], mode='concat')

        # regularization
        question_cnn = ActivityRegularization(l2=0.0001)(question_cnn)
        answer_cnn = ActivityRegularization(l2=0.0001)(answer_cnn)

        # dropout
        question_dropout = dropout(question_cnn)
        answer_dropout = dropout(answer_cnn)

        # maxpooling
        maxpool = Lambda(lambda x: K.max(x, axis=1, keepdims=False), output_shape=lambda x: (x[0], x[2]))
        question_pool = maxpool(question_dropout)
        answer_pool = maxpool(answer_dropout)

        # activation
        activation = Activation('tanh')
        question_output = activation(question_pool)
        answer_output = activation(answer_pool)

        return question_output, answer_output
コード例 #18
0
 def build_model(self):
     lstm_branch = []
     input_branch = []
     for i in range(self.layers_number):
         main_input = Input(shape=(self.lstm_timesteps, self.input_dim), name="main_input_" + str(i))
         input_branch.append(main_input)
         lstm_out = LSTM(self.lstm_hidden, return_sequences=True)(main_input)
         # auxiliary_input = Input(batch_shape=(self.batch_size,1,self.lstm_timesteps), name='auxiliary_input'+str(i))
         auxiliary_input = Input(shape=(1, self.lstm_timesteps), name="auxiliary_input" + str(i))
         input_branch.append(auxiliary_input)
         """
         x1 = Merge([lstm_out, auxiliary_input], mode=lambda x, y: (x*y).sum(axis=0),
                 name='merge_lstm_auxi'+str(i))
         """
         x1 = merge([auxiliary_input, lstm_out], mode="dot", dot_axes=[2, 1], name="merge_lstm_auxi" + str(i))
         assert x1
         flatten = Reshape((self.lstm_hidden,))(x1)
         c_input = Input(shape=(6,), name="c_input" + str(i))
         input_branch.append(c_input)
         x2 = merge([flatten, c_input], mode="concat")
         x2 = Dense(
             self.lstm_hidden, activation="relu", W_regularizer=l2(0.001), activity_regularizer=activity_l2(0.001)
         )(x2)
         assert x2
         lstm_branch.append(x2)
     lstm_all_out = merge(lstm_branch, mode="sum", name="lstm_all_out")
     """
     dense_relu = Dense(self.lstm_hidden, activation='relu', W_regularizer=l2(0.001),
             activity_regularizer=activity_l2(0.001))(lstm_all_out)
     """
     final_loss = Dense(self.output_dim, name="main_output")(lstm_all_out)
     self.model = Model(input_branch, output=final_loss)
     self.model.compile(loss="mean_squared_error", optimizer="adagrad")
     plot(self.model, to_file="multiple_model.png", show_shapes=True)
コード例 #19
0
    def qmodel(self, trainable):
        inp_s = Input( shape=(xp.STATE_DIM,),  name="inp_s")  # batch (None, ...) added automatically
        inp_a = Input( shape=(xp.ACTION_DIM,), name="inp_a")

        a1 = Dense(640, activation='relu', W_regularizer=l2(0.01))
        a2 = Dense(640, activation='relu', W_regularizer=l2(0.01))
        #a3 = Dense(320, activation='relu', W_regularizer=l2(0.01), activity_regularizer=activity_l2(0.01))
        a_out = Dense(1, W_regularizer=l2(0.01)) #W_constraint=nonneg())

        v1 = Dense(640, activation='relu', W_regularizer=l2(0.01))
        v2 = Dense(640, activation='relu', W_regularizer=l2(0.01))
        v_out = Dense(1, W_regularizer=l2(0.01))

        gaussian = Lambda(gaussian_of_x, output_shape=gaussian_of_x_shape)
        parabolized_action = merge( [
            a_out( a2(a1( merge([inp_s, inp_a], mode='concat') )) ),
            gaussian(inp_a)
            ], mode='mul')
        out_tensor = a_out( a2(a1( merge([inp_s, inp_a], mode='concat') )) )
        #out_tensor = merge( [
        #    parabolized_action,
        #    v_out( v2(v1(inp_s)) )
        #    ], mode='sum' )

        Qmod = Model( input=[inp_s,inp_a], output=out_tensor )
        Qmod.compile(loss='mse', optimizer=Adam(lr=0.0005, beta_2=0.9999))
        if not trainable:
            for layer in [v1,v2,v_out, a1,a2,a_out]:
                layer.trainable = False  # model already compiled (that's where this flag used), this assignment avoids learning Q layers by learning policy
        return Qmod
コード例 #20
0
ファイル: keras_models.py プロジェクト: Xls1994/DeepLearning
    def build(self):
        assert self.config['question_len'] == self.config['answer_len']

        question = self.question
        answer = self.get_answer()

        # add embedding layers
        weights = np.load(self.config['initial_embed_weights'])
        embedding = Embedding(input_dim=self.config['n_words'],
                              output_dim=weights.shape[1],
                              weights=[weights])
        question_embedding = embedding(question)
        answer_embedding = embedding(answer)

        # cnn
        cnns = [Convolution1D(filter_length=filter_length,
                              nb_filter=500,
                              activation='tanh',
                              border_mode='same') for filter_length in [2, 3, 5, 7]]
        question_cnn = merge([cnn(question_embedding) for cnn in cnns], mode='concat')
        answer_cnn = merge([cnn(answer_embedding) for cnn in cnns], mode='concat')

        # maxpooling
        maxpool = Lambda(lambda x: K.max(x, axis=1, keepdims=False), output_shape=lambda x: (x[0], x[2]))
        maxpool.supports_masking = True
        enc = Dense(100, activation='tanh')
        question_pool = enc(maxpool(question_cnn))
        answer_pool = enc(maxpool(answer_cnn))

        return question_pool, answer_pool
コード例 #21
0
ファイル: keras_models.py プロジェクト: Xls1994/DeepLearning
    def build(self):
        question = self.question
        answer = self.get_answer()

        # add embedding layers
        weights = np.load(self.config['initial_embed_weights'])
        embedding = Embedding(input_dim=self.config['n_words'],
                              output_dim=weights.shape[1],
                              # mask_zero=True,
                              weights=[weights])
        question_embedding = embedding(question)
        answer_embedding = embedding(answer)

        # question rnn part
        f_rnn = LSTM(141, return_sequences=True, consume_less='mem')
        b_rnn = LSTM(141, return_sequences=True, consume_less='mem', go_backwards=True)
        question_f_rnn = f_rnn(question_embedding)
        question_b_rnn = b_rnn(question_embedding)

        # maxpooling
        maxpool = Lambda(lambda x: K.max(x, axis=1, keepdims=False), output_shape=lambda x: (x[0], x[2]))
        maxpool.supports_masking = True
        question_pool = merge([maxpool(question_f_rnn), maxpool(question_b_rnn)], mode='concat', concat_axis=-1)

        # answer rnn part
        from attention_lstm import AttentionLSTMWrapper
        f_rnn = AttentionLSTMWrapper(f_rnn, question_pool, single_attention_param=True)
        b_rnn = AttentionLSTMWrapper(b_rnn, question_pool, single_attention_param=True)

        answer_f_rnn = f_rnn(answer_embedding)
        answer_b_rnn = b_rnn(answer_embedding)
        answer_pool = merge([maxpool(answer_f_rnn), maxpool(answer_b_rnn)], mode='concat', concat_axis=-1)

        return question_pool, answer_pool
コード例 #22
0
ファイル: model.py プロジェクト: kadarakos/hieratt
def VGG19_hieratt(query_in_size, query_embed_size, nb_classes):
    """Stack hierarchical attention on pre-trained VGG19.
    Requires https://github.com/fchollet/deep-learning-models"""

    base_model = VGG19(weights='imagenet')    
    input_image = base_model.input
    input_question = Input(shape=(query_in_size,))     # question vector
    
    # Model up to 3rd block
    f_1 = Model(input=img_in, output=base_model.get_layer('block3_pool').output)
    f_1 = f_1(img_in)
    f_1 = Reshape((256, 28*28))(f_1)
    f_1 = Permute((2,1))(f_1)


    q_1   = Dense(query_embed_size, activation='relu')(input_question)  # Encode question
    # Add question embedding to each feature column
    q_1   = RepeatVector(28*28)(q_1)
    q_f   = merge([f_1, q_1], 'concat')
    # Estimate and apply attention per feature
    att_1 = TimeDistributedDense(1, activation="sigmoid")(q_f)
    att_1 = Lambda(repeat_1, output_shape=(28*28, 256))(att_1)
    att_1 = merge([f_1, att_1], 'mul')
    # Reshape to the original feature map from previous layer
    att_1 = Permute((2,1))(att_1)
    f_1_att = Reshape((256, 28, 28))(att_1)


    model = Model(input=[img_in, input_question], output=f_1_att)
    print model.summary()
コード例 #23
0
    def create_model(self, height=32, width=32, channels=3, load_weights=False, batch_size=128,
                     small_train_images=False):
        """
            Creates a model to remove / reduce noise from upscaled images.
        """
        from keras.layers.convolutional import Deconvolution2D

        assert height % 4 == 0, "Height of the image must be divisible by 4"
        assert width % 4 == 0, "Width of the image must be divisible by 4"

        if K.image_dim_ordering() == "th":
            shape = (channels, width, height)
        else:
            shape = (width, height, channels)

        init = Input(shape=shape)

        level1_1 = Convolution2D(self.n1, 3, 3, activation='relu', border_mode='same')(init)
        level2_1 = Convolution2D(self.n1, 3, 3, activation='relu', border_mode='same')(level1_1)

        level2_2 = Deconvolution2D(self.n1, 3, 3, activation='relu', output_shape=(None, channels, height, width), border_mode='same')(level2_1)
        level2 = merge([level2_1, level2_2], mode='sum')

        level1_2 = Deconvolution2D(self.n1, 3, 3, activation='relu', output_shape=(None, channels, height, width), border_mode='same')(level2)
        level1 = merge([level1_1, level1_2], mode='sum')

        decoded = Convolution2D(channels, 5, 5, activation='linear', border_mode='same')(level1)

        model = Model(init, decoded)
        model.compile(optimizer='adam', loss='mse', metrics=[PSNRLoss])
        if load_weights: model.load_weights("weights/Denoising AutoEncoder.h5")

        self.model = model
        return model
コード例 #24
0
def inception_stem(input):
    """Create inception stem."""
    if K.image_dim_ordering() == "th":
        channel_axis = 1
    else:
        channel_axis = -1

    # Input Shape is 299 x 299 x 3 (th) or 3 x 299 x 299 (th)
    x = conv_block(input, 32, 3, 3, subsample=(2, 2), border_mode='valid')
    x = conv_block(x, 32, 3, 3, border_mode='valid')
    x = conv_block(x, 64, 3, 3)

    x1 = MaxPooling2D((3, 3), strides=(2, 2), border_mode='valid')(x)
    x2 = conv_block(x, 96, 3, 3, subsample=(2, 2), border_mode='valid')

    x = merge([x1, x2], mode='concat', concat_axis=channel_axis)

    x1 = conv_block(x, 64, 1, 1)
    x1 = conv_block(x1, 96, 3, 3, border_mode='valid')

    x2 = conv_block(x, 64, 1, 1)
    x2 = conv_block(x2, 64, 1, 7)
    x2 = conv_block(x2, 64, 7, 1)
    x2 = conv_block(x2, 96, 3, 3, border_mode='valid')

    x = merge([x1, x2], mode='concat', concat_axis=channel_axis)

    x1 = conv_block(x, 192, 3, 3, subsample=(2, 2), border_mode='valid')
    x2 = MaxPooling2D((3, 3), strides=(2, 2), border_mode='valid')(x)

    x = merge([x1, x2], mode='concat', concat_axis=channel_axis)
    return x
コード例 #25
0
def inception_resnet_v2_A(input, scale_residual=True):
    if K.image_dim_ordering() == "th":
        channel_axis = 1
    else:
        channel_axis = -1

    # Input is relu activation
    init = input

    ir1 = Convolution2D(32, 1, 1, activation='relu', border_mode='same')(input)

    ir2 = Convolution2D(32, 1, 1, activation='relu', border_mode='same')(input)
    ir2 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(ir2)

    ir3 = Convolution2D(32, 1, 1, activation='relu', border_mode='same')(input)
    ir3 = Convolution2D(48, 3, 3, activation='relu', border_mode='same')(ir3)
    ir3 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(ir3)

    ir_merge = merge([ir1, ir2, ir3], concat_axis=channel_axis, mode='concat')

    ir_conv = Convolution2D(384, 1, 1, activation='linear', border_mode='same')(ir_merge)
    if scale_residual: ir_conv = Lambda(lambda x: x * 0.1)(ir_conv)

    out = merge([init, ir_conv], mode='sum')
    out = BatchNormalization(axis=channel_axis)(out)
    out = Activation("relu")(out)
    return out
コード例 #26
0
def inception_C(input):
    if K.image_dim_ordering() == "th":
        channel_axis = 1
    else:
        channel_axis = -1

    c1 = conv_block(input, 256, 1, 1)

    c2 = conv_block(input, 384, 1, 1)
    c2_1 = conv_block(c2, 256, 1, 3)
    c2_2 = conv_block(c2, 256, 3, 1)
    c2 = merge([c2_1, c2_2], mode='concat', concat_axis=channel_axis)

    c3 = conv_block(input, 384, 1, 1)
    c3 = conv_block(c3, 448, 3, 1)
    c3 = conv_block(c3, 512, 1, 3)
    c3_1 = conv_block(c3, 256, 1, 3)
    c3_2 = conv_block(c3, 256, 3, 1)
    c3 = merge([c3_1, c3_2], mode='concat', concat_axis=channel_axis)

    c4 = AveragePooling2D((3, 3), strides=(1, 1), border_mode='same')(input)
    c4 = conv_block(c4, 256, 1, 1)

    m = merge([c1, c2, c3, c4], mode='concat', concat_axis=channel_axis)
    return m
コード例 #27
0
def inception_resnet_stem(input):
    if K.image_dim_ordering() == "th":
        channel_axis = 1
    else:
        channel_axis = -1

    # Input Shape is 299 x 299 x 3 (th) or 3 x 299 x 299 (th)
    c = Convolution2D(32, 3, 3, activation='relu', subsample=(2, 2))(input)
    c = Convolution2D(32, 3, 3, activation='relu', )(c)
    c = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(c)

    c1 = MaxPooling2D((3, 3), strides=(2, 2))(c)
    c2 = Convolution2D(96, 3, 3, activation='relu', subsample=(2, 2))(c)

    m = merge([c1, c2], mode='concat', concat_axis=channel_axis)

    c1 = Convolution2D(64, 1, 1, activation='relu', border_mode='same')(m)
    c1 = Convolution2D(96, 3, 3, activation='relu', )(c1)

    c2 = Convolution2D(64, 1, 1, activation='relu', border_mode='same')(m)
    c2 = Convolution2D(64, 7, 1, activation='relu', border_mode='same')(c2)
    c2 = Convolution2D(64, 1, 7, activation='relu', border_mode='same')(c2)
    c2 = Convolution2D(96, 3, 3, activation='relu', border_mode='valid')(c2)

    m2 = merge([c1, c2], mode='concat', concat_axis=channel_axis)

    p1 = MaxPooling2D((3, 3), strides=(2, 2), )(m2)
    p2 = Convolution2D(192, 3, 3, activation='relu', subsample=(2, 2))(m2)

    m3 = merge([p1, p2], mode='concat', concat_axis=channel_axis)
    m3 = BatchNormalization(axis=channel_axis)(m3)
    m3 = Activation('relu')(m3)
    return m3
コード例 #28
0
def build_model(nb_classes, word_vocab_size, chars_vocab_size,
                word_count, word_length, batch_size):
    print('Build model...')
    CONSUME_LESS='gpu'
    char_input = Input(batch_shape=(batch_size,word_count, word_length,),
            dtype='int32', name='char_input')
    character_embedding = TimeDistributed(Embedding(chars_vocab_size, 15,
                                         input_length=word_length,
                                         name='char_embedding'),
                                          name='td_char_embedding')(char_input)
    forward_gru = TimeDistributed(GRU(16,name='char_gru_forward',
                                        consume_less=CONSUME_LESS),
                                   name='td_char_gru_forward')(character_embedding)
    backward_gru = TimeDistributed(GRU(16,name='char_gru_backward',
                                        consume_less=CONSUME_LESS,
                                        go_backwards=True),
                                   name='td_char_gru_backward')(character_embedding)
    char_embedding = merge([forward_gru,backward_gru],mode='concat')

    word_input = Input(batch_shape=(batch_size,word_count,),
            dtype='int32', name='word_input')
    word_embedding = Embedding(word_vocab_size, 32,
                                         input_length=word_count,
                                         name='word_embedding')(word_input)

    embedding = merge([char_embedding,word_embedding],mode='concat')
    word_gru = GRU(32, name='word_lstm', consume_less=CONSUME_LESS)(embedding)
    dense = Dense(nb_classes, activation='sigmoid', name='dense')(word_gru)
    output = Activation('softmax', name='output')(dense)
    model = Model(input=[char_input,word_input], output=output)
    model.compile(loss='categorical_crossentropy', optimizer='adam',
                  metrics=['accuracy'])
    return model
コード例 #29
0
ファイル: DeepCross.py プロジェクト: Ewen2015/Kaggle
def fit(inp_layer, inp_embed, X, y, *params): #X_val,y_val
    print('fitting...')
    #inp_layer, inp_embed = feature_generate(X, cate_columns, cont_columns)
    input = merge(inp_embed, mode = 'concat')
    print('\tinput shape: ', input.shape)
    
    # deep layer
    for i in range(4):
        if i == 0:
            deep = Dense(272, activation='relu')(Flatten()(input))
        else:
            deep = Dense(272, activation='relu')(deep)

    # cross layer
    cross = CrossLayer(output_dim = input.shape[2].value, num_layer = 8, name = "cross_layer")(input)

    #concat both layers
    output = merge([deep, cross], mode = 'concat')
    output = Dense(1, activation = 'sigmoid')(output)
    model = Model(inp_layer, output) 
    
    print(model.summary())
    # plot_model(model, to_file = '/Users/ewenwang/Documents/practice_data/conversion_rate/model.png', show_shapes = True)
    model.compile(optimizer = 'Adam', loss = 'binary_crossentropy', metrics = ["accuracy"])
    if len(params) == 2:
        X_val = params[0]
        y_val = params[1]    
        model.fit([X[c] for c in X.columns], y, batch_size = 1024, epochs = 5, validation_data = ([X_val[c] for c in X_val.columns], y_val))
    else:
        model.fit([X[c] for c in X.columns], y, batch_size = 1024, epochs = 1)
    return model
コード例 #30
0
ファイル: test_core.py プロジェクト: dotzlab/keras
def test_merge():
    from keras.layers import Input, merge
    from keras.models import Model

    # test modes: 'sum', 'mul', 'concat', 'ave', 'cos', 'dot'.
    input_shapes = [(3, 2), (3, 2)]
    inputs = [np.random.random(shape) for shape in input_shapes]

    # test functional API
    for mode in ['sum', 'mul', 'concat', 'ave']:
        print(mode)
        input_a = Input(shape=input_shapes[0][1:])
        input_b = Input(shape=input_shapes[1][1:])
        merged = merge([input_a, input_b], mode=mode)
        model = Model([input_a, input_b], merged)
        model.compile('rmsprop', 'mse')

        expected_output_shape = model.get_output_shape_for(input_shapes)
        actual_output_shape = model.predict(inputs).shape
        assert expected_output_shape == actual_output_shape

        config = model.get_config()
        model = Model.from_config(config)
        model.compile('rmsprop', 'mse')

    # test lambda with output_shape lambda
    input_a = Input(shape=input_shapes[0][1:])
    input_b = Input(shape=input_shapes[1][1:])
    merged = merge([input_a, input_b],
                   mode=lambda tup: K.concatenate([tup[0], tup[1]]),
                   output_shape=lambda tup: (tup[0][:-1],) + (tup[0][-1] + tup[1][-1],))
    expected_output_shape = model.get_output_shape_for(input_shapes)
    actual_output_shape = model.predict(inputs).shape
    assert expected_output_shape == actual_output_shape

    config = model.get_config()
    model = Model.from_config(config)
    model.compile('rmsprop', 'mse')

    # test function with output_shape function
    def fn_mode(tup):
        x, y = tup
        return K.concatenate([x, y])

    def fn_output_shape(tup):
        s1, s2 = tup
        return (s1[:-1],) + (s1[-1] + s2[-1],)

    input_a = Input(shape=input_shapes[0][1:])
    input_b = Input(shape=input_shapes[1][1:])
    merged = merge([input_a, input_b],
                   mode=fn_mode,
                   output_shape=fn_output_shape)
    expected_output_shape = model.get_output_shape_for(input_shapes)
    actual_output_shape = model.predict(inputs).shape
    assert expected_output_shape == actual_output_shape

    config = model.get_config()
    model = Model.from_config(config)
    model.compile('rmsprop', 'mse')
コード例 #31
0
ファイル: 3D_UNet_raw_gpu1.py プロジェクト: kirk86/kaggle
def unet_model():

    inputs = Input(shape=(1, max_slices, img_size, img_size))
    conv1 = Convolution3D(width,
                          3,
                          3,
                          3,
                          activation='relu',
                          border_mode='same')(inputs)
    conv1 = BatchNormalization(axis=1)(conv1)
    conv1 = Convolution3D(width * 2,
                          3,
                          3,
                          3,
                          activation='relu',
                          border_mode='same')(conv1)
    conv1 = BatchNormalization(axis=1)(conv1)
    pool1 = MaxPooling3D(pool_size=(2, 2, 2),
                         strides=(2, 2, 2),
                         border_mode='same')(conv1)

    conv2 = Convolution3D(width * 2,
                          3,
                          3,
                          3,
                          activation='relu',
                          border_mode='same')(pool1)
    conv2 = BatchNormalization(axis=1)(conv2)
    conv2 = Convolution3D(width * 4,
                          3,
                          3,
                          3,
                          activation='relu',
                          border_mode='same')(conv2)
    conv2 = BatchNormalization(axis=1)(conv2)
    pool2 = MaxPooling3D(pool_size=(2, 2, 2),
                         strides=(2, 2, 2),
                         border_mode='same')(conv2)

    conv3 = Convolution3D(width * 4,
                          3,
                          3,
                          3,
                          activation='relu',
                          border_mode='same')(pool2)
    conv3 = BatchNormalization(axis=1)(conv3)
    conv3 = Convolution3D(width * 8,
                          3,
                          3,
                          3,
                          activation='relu',
                          border_mode='same')(conv3)
    conv3 = BatchNormalization(axis=1)(conv3)
    pool3 = MaxPooling3D(pool_size=(2, 2, 2),
                         strides=(2, 2, 2),
                         border_mode='same')(conv3)

    conv4 = Convolution3D(width * 8,
                          3,
                          3,
                          3,
                          activation='relu',
                          border_mode='same')(pool3)
    conv4 = BatchNormalization(axis=1)(conv4)
    conv4 = Convolution3D(width * 8,
                          3,
                          3,
                          3,
                          activation='relu',
                          border_mode='same')(conv4)
    conv4 = BatchNormalization(axis=1)(conv4)
    conv4 = Convolution3D(width * 16,
                          3,
                          3,
                          3,
                          activation='relu',
                          border_mode='same')(conv4)
    conv4 = BatchNormalization(axis=1)(conv4)

    up5 = merge([UpSampling3D(size=(2, 2, 2))(conv4), conv3],
                mode='concat',
                concat_axis=1)
    conv5 = SpatialDropout3D(0.2)(up5)
    conv5 = Convolution3D(width * 8,
                          3,
                          3,
                          3,
                          activation='relu',
                          border_mode='same')(conv5)
    #conv5 = BatchNormalization()(conv5)
    conv5 = Convolution3D(width * 8,
                          3,
                          3,
                          3,
                          activation='relu',
                          border_mode='same')(conv5)
    #conv5 = BatchNormalization()(conv5)

    up6 = merge([UpSampling3D(size=(2, 2, 2))(conv5), conv2],
                mode='concat',
                concat_axis=1)
    conv6 = SpatialDropout3D(0.2)(up6)
    conv6 = Convolution3D(width * 4,
                          3,
                          3,
                          3,
                          activation='relu',
                          border_mode='same')(conv6)
    #conv6 = BatchNormalization()(conv6)
    conv6 = Convolution3D(width * 4,
                          3,
                          3,
                          3,
                          activation='relu',
                          border_mode='same')(conv6)
    #conv6 = BatchNormalization()(conv6)

    up7 = merge([UpSampling3D(size=(2, 2, 2))(conv6), conv1],
                mode='concat',
                concat_axis=1)
    conv7 = SpatialDropout3D(0.2)(up7)
    conv7 = Convolution3D(width * 2,
                          3,
                          3,
                          3,
                          activation='relu',
                          border_mode='same')(conv7)
    #conv7 = BatchNormalization()(conv7)
    conv7 = Convolution3D(width * 2,
                          3,
                          3,
                          3,
                          activation='relu',
                          border_mode='same')(conv7)
    #conv7 = BatchNormalization()(conv7)

    conv8 = Convolution3D(1, 1, 1, 1, activation='sigmoid')(conv7)

    model = Model(input=inputs, output=conv8)
    model.compile(optimizer=Adam(lr=1e-5),
                  loss=dice_coef_loss,
                  metrics=[dice_coef])

    return model
コード例 #32
0
def build_model(input_shape):

    xin = Input(input_shape)

    #shift the below down by one
    x1 = conv_block(xin, 8, activation='relu')
    x1_ident = AveragePooling3D()(xin)
    x1_merged = merge([x1, x1_ident], mode='concat', concat_axis=1)

    x2_1 = conv_block(x1_merged, 24, activation='relu')  #outputs 37 ch
    x2_ident = AveragePooling3D()(x1_ident)
    x2_merged = merge([x2_1, x2_ident], mode='concat', concat_axis=1)

    #by branching we reduce the #params
    x3_ident = AveragePooling3D()(x2_ident)
    x3_malig = conv_block(x2_merged, 48,
                          activation='relu')  #outputs 25 + 16 ch = 41
    x3_malig_merged = merge([x3_malig, x3_ident], mode='concat', concat_axis=1)

    x4_ident = AveragePooling3D()(x3_ident)
    x4_malig = conv_block(x3_malig_merged, 64,
                          activation='relu')  #outputs 25 + 16 ch = 41
    x4_merged = merge([x4_malig, x4_ident], mode='concat', concat_axis=1)

    x5_malig = conv_block(x4_merged, 64)  #outputs 25 + 16 ch = 41
    xpool_malig = BatchNormalization(momentum=0.995)(
        GlobalMaxPooling3D()(x5_malig))
    xout_malig = Dense(1, name='o_mal',
                       activation='softplus')(xpool_malig)  #relu output

    x5_diam = conv_block(x4_merged, 64)  #outputs 25 + 16 ch = 41
    xpool_diam = BatchNormalization(momentum=0.995)(
        GlobalMaxPooling3D()(x5_diam))
    xout_diam = Dense(1, name='o_diam',
                      activation='softplus')(xpool_diam)  #relu output

    x5_lob = conv_block(x4_merged, 64)  #outputs 25 + 16 ch = 41
    xpool_lob = BatchNormalization(momentum=0.995)(
        GlobalMaxPooling3D()(x5_lob))
    xout_lob = Dense(1, name='o_lob',
                     activation='softplus')(xpool_lob)  #relu output

    x5_spic = conv_block(x4_merged, 64)  #outputs 25 + 16 ch = 41
    xpool_spic = BatchNormalization(momentum=0.995)(
        GlobalMaxPooling3D()(x5_spic))
    xout_spic = Dense(1, name='o_spic',
                      activation='softplus')(xpool_spic)  #relu output

    model = Model(input=xin,
                  output=[xout_diam, xout_lob, xout_spic, xout_malig])

    if input_shape[1] == 32:
        lr_start = .01
    elif input_shape[1] == 64:
        lr_start = .003
    elif input_shape[1] == 128:
        lr_start = .002
    # elif input_shape[1] == 96:
    # lr_start = 5e-4

    opt = Nadam(lr_start, clipvalue=1.0)
    print 'compiling model'

    model.compile(optimizer=opt,
                  loss='mse',
                  loss_weights={
                      'o_diam': 0.06,
                      'o_lob': 0.5,
                      'o_spic': 0.5,
                      'o_mal': 1.0
                  })
    return model
コード例 #33
0
def main():
    # load data
    print("loading data...")
    ts = time.time()
    datapath = os.path.join(Paramater.DATAPATH, "2016", month)
    if is_mmn:
        fname = os.path.join(
            datapath, 'CACHE', 'TaxiBJ_C{}_P{}_T{}_{}_mmn_speed.h5'.format(
                len_closeness, len_period, len_trend,
                "External" if hasExternal else "noExternal"))
    else:
        fname = os.path.join(
            datapath, 'CACHE', 'TaxiBJ_C{}_P{}_T{}_{}_speed.h5'.format(
                len_closeness, len_period, len_trend,
                "External" if hasExternal else "noExternal"))
    pkl = fname + '.preprocessing_speed.pkl'
    fn = "48_48_20_LinearInterpolationFixed"
    if os.path.exists(fname) and CACHEDATA:
        X_train, Y_train, X_test, Y_test, mmn, external_dim, \
        timestamp_train, timestamp_test, noConditionRegions, x_num, y_num, z_num = read_cache(fname, is_mmn,
                                                                                              pkl)
        print("load %s successfully" % fname)
    else:
        datapaths = [os.path.join(datapath, fn)]
        noConditionRegionsPath = os.path.join(datapath,
                                              "48_48_20_noSpeedRegion_0.05")
        X_train, Y_train, X_test, Y_test, mmn, external_dim, timestamp_train, timestamp_test, noConditionRegions, \
        x_num, y_num, z_num = Data.loadDataFromRaw(
                paths=datapaths, noSpeedRegionPath=noConditionRegionsPath, nb_flow=nb_flow, len_closeness=len_closeness,
                len_period=len_period, len_trend=len_trend
                , len_test=len_test, maxMinNormalization=is_mmn, preprocess_name=pkl,
                meta_data=hasExternal,
                meteorol_data=hasExternal,
                holiday_data=hasExternal, isComplete=False)

        if CACHEDATA:
            cache(fname, X_train, Y_train, X_test, Y_test, external_dim,
                  timestamp_train, timestamp_test, noConditionRegions, is_mmn,
                  x_num, y_num, nb_flow)
    z_num = nb_flow
    # print("\n days (test): ", [v[:8] for v in timestamp_test[0::72]])
    print("\nelapsed time (loading data): %.3f seconds\n" % (time.time() - ts))

    print('=' * 10)
    print("compiling model_train...")
    print(
        "**at the first time, it takes a few minites to compile if you use [Theano] as the backend**"
    )

    ts = time.time()

    X_train, Y_train = Data.getSequenceXY(X_train, Y_train, step)
    Y_train_final = Y_train[:, -1]
    X_test, Y_test = Data.getSequenceXY(X_test, Y_test, step)
    Y_test_final = Y_test[:, -1]
    X_train.append(Y_train)
    X_test.append(Y_test)

    timestamp_train = timestamp_train[step - 1:]
    timestamp_test = timestamp_test[step - 1:]

    if use_diff_test:
        X_test_old = X_test
        Y_test_old = Y_test
        import pandas as pd
        df_diff = pd.read_csv("./data/2016/all/" + fn + "_diff.csv",
                              index_col=0)
        # 大于200 有335个作为test
        test_time = df_diff[df_diff["diff"] > 200]["time"].values
        timestamp_train_dict = dict(
            zip(timestamp_train, range(len(timestamp_train))))
        timestamp_test_dict = dict(
            zip(timestamp_test, range(len(timestamp_test))))
        new_X_test = []
        new_Y_test = []
        if isinstance(X_train, list):
            for _ in range(len(X_train)):
                new_X_test.append([])
        for _test_time in test_time:
            _test_time = str(_test_time)
            if (_test_time in timestamp_train_dict):
                index = timestamp_train_dict[_test_time]
                if isinstance(X_train, list):
                    for i in range(len(X_train)):
                        new_X_test[i].append(X_train[i][index])
                else:
                    new_X_test.append(X_train[index])
                new_Y_test.append(Y_train[index])

            if (_test_time in timestamp_test_dict):
                index = timestamp_test_dict[_test_time]
                if isinstance(X_test_old, list):
                    for i in range(len(X_test_old)):
                        new_X_test[i].append(X_test_old[i][index])
                else:
                    new_X_test.append(X_test_old[index])
                new_Y_test.append(Y_test_old[index])

                # if (_test_time not in timestamp_train_dict and _test_time not in timestamp_test_dict):
                #     print(_test_time)

        if isinstance(new_X_test, list):
            for i in range(len(new_X_test)):
                new_X_test[i] = np.stack(new_X_test[i], axis=0)
        else:
            new_X_test = np.stack(new_X_test, axis=0)
        new_Y_test = np.stack(new_Y_test, axis=0)

        # if isinstance(new_X_test, list):
        #     for i in range(len(new_X_test)):
        #         print(new_X_test[i].shape)
        # else:
        #     print(new_X_test.shape)
        # print(new_Y_test.shape)
        X_test = new_X_test
        Y_test = new_Y_test
        Y_test_final = Y_test[:, -1]

    # print "X_test len:", len(X_test)
    # for x in X_test:
    #     print x.shape
    # print Y_test.shape
    # print z_num, x_num, y_num
    print "start build model_train"

    outputs = []
    inputs = []

    resUnit_share_layers = []
    resUnit_share_layers2 = []
    resUnit_share_layers3 = []
    shared_conv1 = Convolution2D(filters=64,
                                 kernel_size=(3, 3),
                                 padding="same")
    shared_conv2 = Convolution2D(nb_filter=nb_flow,
                                 nb_row=3,
                                 nb_col=3,
                                 border_mode="same")

    shared_conv3 = Convolution2D(filters=64,
                                 kernel_size=(3, 3),
                                 padding="same")
    shared_conv4 = Convolution2D(nb_filter=nb_flow,
                                 nb_row=3,
                                 nb_col=3,
                                 border_mode="same")
    shared_conv5 = Convolution2D(filters=64,
                                 kernel_size=(3, 3),
                                 padding="same")
    shared_conv6 = Convolution2D(nb_filter=nb_flow,
                                 nb_row=3,
                                 nb_col=3,
                                 border_mode="same")

    shared_convLSTM_period = ConvLSTM2D(nb_filter=32,
                                        nb_row=3,
                                        nb_col=3,
                                        border_mode="same")
    shared_conv_period = Convolution2D(nb_filter=nb_flow,
                                       nb_row=3,
                                       nb_col=3,
                                       border_mode="same")

    shared_convLSTM_trend = ConvLSTM2D(nb_filter=32,
                                       nb_row=3,
                                       nb_col=3,
                                       border_mode="same")
    shared_conv_trend = Convolution2D(nb_filter=nb_flow,
                                      nb_row=3,
                                      nb_col=3,
                                      border_mode="same")

    shared_ilayers = []

    shared_embeding = Dense(output_dim=10)
    shared_embeding2 = Dense(output_dim=nb_flow * x_num * y_num)

    assert l < step
    for _ in range(step):
        main_outputs = []
        if len_closeness > 0:
            input = Input(shape=(nb_flow * len_closeness, x_num, y_num))
            inputs.append(input)
            # Conv1
            conv1 = shared_conv1(input)
            # [nb_residual_unit] Residual Units
            resUnit_share_index = [0]
            residual_output = ResUnits(_residual_unit,
                                       nb_filter=64,
                                       repetations=nb_residual_unit,
                                       share=True,
                                       shareIndex=resUnit_share_index,
                                       shares=resUnit_share_layers)(conv1)
            # Conv2
            activation = Activation('relu')(residual_output)
            conv2 = shared_conv2(activation)
            main_outputs.append(conv2)

            # input = Input(shape=(nb_flow * len_closeness, x_num, y_num))
            # inputs.append(input)
            # # conv1 = Convolution2D(nb_filter=64, nb_row=3, nb_col=3, border_mode="same")(input)
            # # act1 = Activation("relu")(conv1)
            # reshape = Reshape((len_closeness, nb_flow, x_num, y_num))(input)
            # convLSTM = ConvLSTM2D(nb_filter=32, nb_row=3, nb_col=3, border_mode="same")(reshape)
            # act2 = Activation("relu")(convLSTM)
            # conv2 = Convolution2D(nb_filter=nb_flow, nb_row=3, nb_col=3, border_mode="same")(act2)
            # main_outputs.append(conv2)

        if len_period > 0:
            input = Input(shape=(nb_flow * len_period, x_num, y_num))
            inputs.append(input)
            # Conv1
            conv1 = shared_conv3(input)
            # [nb_residual_unit] Residual Units
            resUnit_share_index = [0]
            residual_output = ResUnits(_residual_unit,
                                       nb_filter=64,
                                       repetations=nb_residual_unit,
                                       share=True,
                                       shareIndex=resUnit_share_index,
                                       shares=resUnit_share_layers2)(conv1)
            # Conv2
            activation = Activation('relu')(residual_output)
            conv2 = shared_conv4(activation)
            main_outputs.append(conv2)
            # input = Input(shape=(nb_flow * len_period, x_num, y_num))
            # inputs.append(input)
            # # conv1 = Convolution2D(nb_filter=64, nb_row=3, nb_col=3, border_mode="same")(input)
            # # act1 = Activation("relu")(conv1)
            # input = Reshape((len_period, nb_flow, x_num, y_num))(input)
            # convLSTM = shared_convLSTM_period(input)
            # act2 = Activation("relu")(convLSTM)
            # conv2 = shared_conv_period(act2)
            # main_outputs.append(conv2)

        if len_trend > 0:
            input = Input(shape=(nb_flow * len_trend, x_num, y_num))
            inputs.append(input)
            # Conv1
            conv1 = shared_conv5(input)
            # [nb_residual_unit] Residual Units
            resUnit_share_index = [0]
            residual_output = ResUnits(_residual_unit,
                                       nb_filter=64,
                                       repetations=nb_residual_unit,
                                       share=True,
                                       shareIndex=resUnit_share_index,
                                       shares=resUnit_share_layers3)(conv1)
            # Conv2
            activation = Activation('relu')(residual_output)
            conv2 = shared_conv6(activation)
            main_outputs.append(conv2)
            # input = Input(shape=(nb_flow * len_trend, x_num, y_num))
            # inputs.append(input)
            # # conv1 = Convolution2D(nb_filter=64, nb_row=3, nb_col=3, border_mode="same")(input)
            # # act1 = Activation("relu")(conv1)
            # reshape = Reshape((len_trend, nb_flow, x_num, y_num))(input)
            # convLSTM = shared_convLSTM_trend(reshape)
            # act2 = Activation("relu")(convLSTM)
            # conv2 = shared_conv_trend(act2)
            # main_outputs.append(conv2)

        if len(main_outputs) == 1:
            main_output = main_outputs[0]
        else:
            new_outputs = []
            for index, output in enumerate(main_outputs):
                if (len(shared_ilayers) <= index):
                    shared_ilayers.append(iLayer())

                new_outputs.append(shared_ilayers[index](output))
            main_output = merge(new_outputs, mode='sum')

        if external_dim != None and external_dim > 0:
            # external input
            external_input = Input(shape=(external_dim, ))
            inputs.append(external_input)
            embedding = shared_embeding(external_input)
            embedding = Activation('relu')(embedding)
            h1 = shared_embeding2(embedding)
            activation = Activation('relu')(h1)
            external_output = Reshape((nb_flow, x_num, y_num))(activation)
            main_output = merge([main_output, external_output], mode='sum')

        main_output = Activation('tanh')(main_output)
        outputs.append(main_output)

    main_output = merge(outputs, mode="concat", concat_axis=1)
    predict_sequence = Reshape((step, z_num, x_num, y_num))(main_output)

    input_targets = Input(shape=(step, z_num, x_num, y_num),
                          name="input_targets")
    inputs.append(input_targets)
    main_output = eRNN(error_hidden_dim, (z_num, x_num, y_num), l,
                       False)([predict_sequence, input_targets])

    model_train = Model(inputs=inputs, outputs=[predict_sequence, main_output])
    adam = Adam(lr=lr)
    model_train.compile(loss=['mse', 'mse'],
                        loss_weights=[0.2, 1],
                        optimizer=adam,
                        metrics=[metrics.rmse])
    # model_train.compile(loss=lambda y_true,y_preiod: K.mean(K.square(y_preiod - y_true), axis=-1), optimizer=adam, metrics=[metrics.rmse])
    # model_predict = Model(input=inputs, output=main_output)
    # model_predict.compile(optimizer=adam,loss="mse",metrics=metrics.rmse)
    model_train.summary()
    print "finish build model_train"

    hyperparams_name = 'testMyModel4(ernn_{}_h{}_l{}_step{})_speed.c{}.p{}.t{}.resunit{}.lr{}.{}.{}'.format(
        ernn_weight, error_hidden_dim, l, step, len_closeness, len_period,
        len_trend, nb_residual_unit, lr,
        "External" if hasExternal else "noExternal",
        "MMN" if is_mmn else "noMMN")

    fname_param = os.path.join(path_model,
                               '{}.best.h5'.format(hyperparams_name))
    early_stopping = EarlyStopping(monitor='val_e_rnn_1_rmse',
                                   patience=4,
                                   mode='min')
    model_checkpoint = ModelCheckpoint(fname_param,
                                       monitor='val_e_rnn_1_rmse',
                                       verbose=0,
                                       save_best_only=True,
                                       mode='min',
                                       save_weights_only=True)

    print("\nelapsed time (compiling model_train): %.3f seconds\n" %
          (time.time() - ts))

    print('=' * 10)
    print("training model_train...")
    ts = time.time()

    history = model_train.fit(X_train, [Y_train, Y_train_final],
                              epochs=nb_epoch,
                              batch_size=batch_size,
                              validation_split=0.1,
                              callbacks=[early_stopping, model_checkpoint],
                              verbose=1)

    model_train.save_weights(os.path.join(path_model,
                                          '{}.h5'.format(hyperparams_name)),
                             overwrite=True)
    pickle.dump((history.history),
                open(
                    os.path.join(path_result,
                                 '{}.history.pkl'.format(hyperparams_name)),
                    'wb'))
    print("\nelapsed time (training): %.3f seconds\n" % (time.time() - ts))

    print('=' * 10)
    print(
        'evaluating using the model_train that has the best loss on the valid set'
    )
    ts = time.time()
    model_train.load_weights(fname_param)
    score = model_train.evaluate(X_train, [Y_train, Y_train_final],
                                 batch_size=Y_train.shape[0] // 48,
                                 verbose=0)

    if is_mmn:
        print('Train score: %.6f rmse (norm): %.6f rmse (real): %.6f' %
              (score[0], score[1], score[1] * (mmn._max - mmn._min) / 2.))
    else:
        print('Train score: %.6f rmse (real): %.6f' % (score[0], score[1]))

    score = model_train.evaluate(X_test, [Y_test, Y_test_final],
                                 batch_size=Y_test.shape[0] // 12,
                                 verbose=0)

    if is_mmn:
        print('Test score: %.6f rmse (norm): %.6f rmse (real): %.6f' %
              (score[0], score[1], score[1] * (mmn._max - mmn._min) / 2.))
    else:
        print('Test score: %.6f rmse (real): %.6f' % (score[0], score[1]))

    if not is_mmn:
        predict = model_train.predict(X_test)[1]
    else:
        predict = mmn.inverse_transform(model_train.predict(X_test)[1])
        Y_test_final = mmn.inverse_transform(Y_test_final)

    # predict = predict[:, -1]
    # Y_test = Y_test[:, -1]

    # print("predict", predict)
    # print("test", Y_test_final)
    rmse = round(Metric.RMSE(predict, Y_test_final, noConditionRegions), 5)
    save_result(
        predict, Y_test_final, timestamp_test,
        "./result/{}_predict_rmse{}".format(hyperparams_name, str(rmse)))
    print("RMSE:", rmse)
    # print("accuracy", Metric.accuracy(predict, Y_test, noConditionRegions))

    print("\nelapsed time (eval): %.3f seconds\n" % (time.time() - ts))
    exit(1)
    for n_gram in filter_sizes:
        conv_name[i] = str('conv_' + str(n_gram))
        conv_name[i] = Convolution1D(nb_filter=nb_filter,
                                     filter_length=n_gram,
                                     border_mode='valid',
                                     activation='relu',
                                     subsample_length=1,
                                     input_dim=embeddings_dim,
                                     input_length=max_sent_len)(Drop1)
        pool_name[i] = str('maxpool_' + str(n_gram))
        pool_name[i] = MaxPooling1D(pool_length=max_sent_len - n_gram + 1)(
            conv_name[i])
        flat_name[i] = str('flat_' + str(n_gram))
        flat_name[i] = Flatten()(pool_name[i])
        i += 1
    merged = merge([flat_name[0], flat_name[1], flat_name[2]], mode='concat')
    droput_final = Dropout(dropout_prob[1])(merged)
    Dense_final = Dense(1,
                        input_dim=nb_filter * len(filter_sizes))(droput_final)
    Out = Dense(1, activation='sigmoid')(Dense_final)
    model = Model(inputs=main_input, outputs=Out)

    # Print model summary
    # ==================================================
    print(model.summary())

    #Visualize the model in a graph
    #1st method
    #from IPython.display import SVG
    #from keras.utils.vis_utils import model_to_dot
    #from keras.utils import vis_utils
コード例 #35
0
def unet(X,
         Y,
         train_idxs,
         test_idxs,
         nb_epoch=20,
         optomizer='adagrad',
         batch_size=32):
    # Architecture from: https://github.com/jocicmarko/ultrasound-nerve-segmentation
    # Original paper: http://lmb.informatik.uni-freiburg.de/people/ronneber/u-net/
    import numpy as np
    from keras.models import Model
    from keras.layers import Input, merge, Convolution2D, MaxPooling2D, UpSampling2D
    #from keras.optimizers import Adam
    np.random.seed(2016)  # For reproducibility:
    #from sklearn.metrics import (precision_score, recall_score, f1_score, accuracy_score)

    # Process data:
    X_train, X_test, Y_train, Y_test, N_samples, channels, img_rows, img_cols = process_XY(
        X, Y, train_idxs, test_idxs, mask=True)

    #inputs = Input((1, X.shape[0], img_cols))
    inputs = Input((channels, img_rows, img_cols))
    conv1 = Convolution2D(32, 3, 3, activation='relu',
                          border_mode='same')(inputs)
    conv1 = Convolution2D(32, 3, 3, activation='relu',
                          border_mode='same')(conv1)
    pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)

    conv2 = Convolution2D(64, 3, 3, activation='relu',
                          border_mode='same')(pool1)
    conv2 = Convolution2D(64, 3, 3, activation='relu',
                          border_mode='same')(conv2)
    pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)

    conv3 = Convolution2D(128, 3, 3, activation='relu',
                          border_mode='same')(pool2)
    conv3 = Convolution2D(128, 3, 3, activation='relu',
                          border_mode='same')(conv3)
    pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)

    conv4 = Convolution2D(256, 3, 3, activation='relu',
                          border_mode='same')(pool3)
    conv4 = Convolution2D(256, 3, 3, activation='relu',
                          border_mode='same')(conv4)
    pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)

    conv5 = Convolution2D(512, 3, 3, activation='relu',
                          border_mode='same')(pool4)
    conv5 = Convolution2D(512, 3, 3, activation='relu',
                          border_mode='same')(conv5)

    up6 = merge([UpSampling2D(size=(2, 2))(conv5), conv4],
                mode='concat',
                concat_axis=1)
    conv6 = Convolution2D(256, 3, 3, activation='relu',
                          border_mode='same')(up6)
    conv6 = Convolution2D(256, 3, 3, activation='relu',
                          border_mode='same')(conv6)

    up7 = merge([UpSampling2D(size=(2, 2))(conv6), conv3],
                mode='concat',
                concat_axis=1)
    conv7 = Convolution2D(128, 3, 3, activation='relu',
                          border_mode='same')(up7)
    conv7 = Convolution2D(128, 3, 3, activation='relu',
                          border_mode='same')(conv7)

    up8 = merge([UpSampling2D(size=(2, 2))(conv7), conv2],
                mode='concat',
                concat_axis=1)
    conv8 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(up8)
    conv8 = Convolution2D(64, 3, 3, activation='relu',
                          border_mode='same')(conv8)

    up9 = merge([UpSampling2D(size=(2, 2))(conv8), conv1],
                mode='concat',
                concat_axis=1)
    conv9 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(up9)
    conv9 = Convolution2D(32, 3, 3, activation='relu',
                          border_mode='same')(conv9)

    conv10 = Convolution2D(1, 1, 1, activation='sigmoid')(conv9)

    model = Model(input=inputs, output=conv10)

    model.compile(optimizer=optomizer,
                  loss=dice_coef_loss,
                  metrics=[dice_coef])  #Adam(lr=1e-5)

    model.fit(X_train,
              Y_train,
              batch_size=batch_size,
              nb_epoch=nb_epoch,
              verbose=1,
              validation_data=(X_test, Y_test),
              shuffle=True)

    import time
    time.sleep(1)
    if len(X_test) > 0:
        score = []
        Y_pred = model.predict(X_test)
        dc = dice_coef(Y_test, Y_pred)
        score = dc
        print('Dice coefficient: ' + str(dc))
    #        y_pred = model.predict(X_test)
    #        y_pred = y_pred[:,0]
    #        accuracy = accuracy_score(Y_test, y_pred)
    #        recall = recall_score(Y_test, y_pred)
    #        precision = precision_score(Y_test, y_pred)
    #        f1 = f1_score(Y_test, y_pred)
    #        print('Accuracy: {}'.format(accuracy))
    #        print('Recall: {}'.format(recall))
    #        print('Precision: {}'.format(precision))
    #        print('F1: {}'.format(f1))
    #        score = np.array([accuracy, recall, precision, f1])
    else:
        score = []
    return model, score
コード例 #36
0
ファイル: DeepLearn.py プロジェクト: xxyy1/--DataCastle
# normalize test date
X_test_uid=np.array(X_test_uid)
X_test_uid=X_test_uid.reshape(X_test_uid.shape[0],1)

X_test_iid=np.array(X_test_iid)
X_test_iid=X_test_iid.reshape(X_test_iid.shape[0],1)

# define model
input_1=Input(shape=(1,), dtype='int32')
input_2=Input(shape=(1,), dtype='int32')
x1=Embedding(output_dim=128, input_dim=223970, input_length=1)(input_1)
x2=Embedding(output_dim=128, input_dim=14726, input_length=1)(input_2)
x1=Flatten()(x1)
x2=Flatten()(x2)
x = merge([x1, x2], mode='concat')
x = Dropout(0.2)(x)
x = Dense(512, activation='relu')(x)
x = Dropout(0.2)(x)
x = Dense(64, activation='relu')(x)
x = Dropout(0.2)(x)
out = Dense(1, activation='sigmoid')(x)
model = Model(input=[input_1, input_2], output=out)
model.compile(optimizer='rmsprop',
              loss='mean_squared_error',
              metrics=[])
# train model
model.fit([X_train_uid, X_train_iid], Y_train_score,
          nb_epoch=10, batch_size=1024*6)

# predict
コード例 #37
0
 def smart_merge(vectors, **kwargs):
     return vectors[0] if len(vectors) == 1 else merge(vectors, **kwargs)
コード例 #38
0
def create_model(X_vocab_len, X_max_len, y_vocab_len, y_max_len,
                 n_phonetic_features, y1, n1, y2, n2, y3, n3, y4, n4, y5, n5,
                 y6, n6, hidden_size, num_layers):
    def smart_merge(vectors, **kwargs):
        return vectors[0] if len(vectors) == 1 else merge(vectors, **kwargs)

    current_word = Input(shape=(X_max_len, ), dtype='float32',
                         name='input1')  # for encoder (shared)
    decoder_input = Input(shape=(X_max_len, ), dtype='float32',
                          name='input3')  # for decoder -- attention
    right_word1 = Input(shape=(X_max_len, ), dtype='float32', name='input4')
    right_word2 = Input(shape=(X_max_len, ), dtype='float32', name='input5')
    right_word3 = Input(shape=(X_max_len, ), dtype='float32', name='input6')
    right_word4 = Input(shape=(X_max_len, ), dtype='float32', name='input7')
    left_word1 = Input(shape=(X_max_len, ), dtype='float32', name='input8')
    left_word2 = Input(shape=(X_max_len, ), dtype='float32', name='input9')
    left_word3 = Input(shape=(X_max_len, ), dtype='float32', name='input10')
    left_word4 = Input(shape=(X_max_len, ), dtype='float32', name='input11')
    phonetic_input = Input(shape=(n_phonetic_features, ),
                           dtype='float32',
                           name='input12')

    emb_layer1 = Embedding(X_vocab_len,
                           EMBEDDING_DIM,
                           input_length=X_max_len,
                           mask_zero=False,
                           name='Embedding')

    list_of_inputs = [
        current_word, right_word1, right_word2, right_word3, right_word4,
        left_word1, left_word2, left_word3, left_word4
    ]

    current_word_embedding, right_word_embedding1, right_word_embedding2,right_word_embedding3, right_word_embedding4, \
     left_word_embedding1, left_word_embedding2, left_word_embedding3, left_word_embedding4 = [emb_layer1(i) for i in list_of_inputs]

    print("Type:: ", type(current_word_embedding))
    list_of_embeddings1 = [current_word_embedding, right_word_embedding1, right_word_embedding2,right_word_embedding3, right_word_embedding4, \
     left_word_embedding1, left_word_embedding2, left_word_embedding3, left_word_embedding4]

    list_of_embeddings = [
        Dropout(0.50, name='drop1_' + str(j))(i)
        for i, j in zip(list_of_embeddings1, range(len(list_of_embeddings1)))
    ]
    list_of_embeddings = [
        GaussianNoise(0.05, name='noise1_' + str(j))(i)
        for i, j in zip(list_of_embeddings, range(len(list_of_embeddings)))
    ]

    conv4_curr, conv4_right1, conv4_right2, conv4_right3, conv4_right4, conv4_left1, conv4_left2, conv4_left3, conv4_left4 =\
      [Conv1D(filters=no_filters,
       kernel_size=4, padding='valid',activation='relu',
       strides=1, name='conv4_'+str(j))(i) for i,j in zip(list_of_embeddings, range(len(list_of_embeddings)))]

    conv4s = [
        conv4_curr, conv4_right1, conv4_right2, conv4_right3, conv4_right4,
        conv4_left1, conv4_left2, conv4_left3, conv4_left4
    ]
    maxPool4 = [
        MaxPooling1D(name='max4_' + str(j))(i)
        for i, j in zip(conv4s, range(len(conv4s)))
    ]
    avgPool4 = [
        AveragePooling1D(name='avg4_' + str(j))(i)
        for i, j in zip(conv4s, range(len(conv4s)))
    ]

    pool4_curr, pool4_right1, pool4_right2, pool4_right3, pool4_right4, pool4_left1, pool4_left2, pool4_left3, pool4_left4 = \
     [merge([i,j], name='merge_conv4_'+str(k)) for i,j,k in zip(maxPool4, avgPool4, range(len(maxPool4)))]

    conv5_curr, conv5_right1, conv5_right2, conv5_right3, conv5_right4, conv5_left1, conv5_left2, conv5_left3, conv5_left4 = \
      [Conv1D(filters=no_filters,
       kernel_size=5,
       padding='valid',
       activation='relu',
       strides=1, name='conv5_'+str(j))(i) for i,j in zip(list_of_embeddings, range(len(list_of_embeddings)))]

    conv5s = [
        conv5_curr, conv5_right1, conv5_right2, conv5_right3, conv5_right4,
        conv5_left1, conv5_left2, conv5_left3, conv5_left4
    ]
    maxPool5 = [
        MaxPooling1D(name='max5_' + str(j))(i)
        for i, j in zip(conv5s, range(len(conv5s)))
    ]
    avgPool5 = [
        AveragePooling1D(name='avg5_' + str(j))(i)
        for i, j in zip(conv5s, range(len(conv5s)))
    ]

    pool5_curr, pool5_right1, pool5_right2, pool5_right3, pool5_right4, pool5_left1, pool5_left2, pool5_left3, pool5_left4 = \
     [merge([i,j], name='merge_conv5_'+str(k)) for i,j,k in zip(maxPool5, avgPool5, range(len(maxPool5)))]


    maxPools = [pool4_curr, pool4_right1, pool4_right2, pool4_right3, pool4_right4, \
     pool4_left1, pool4_left2, pool4_left3, pool4_left4, \
     pool5_curr, pool5_right1, pool5_right2, pool5_right3, pool5_right4, \
     pool5_left1, pool5_left2, pool5_left3, pool5_left4]

    concat = merge(maxPools, mode='concat', name='main_merge')

    x = Dropout(0.15, name='drop_single1')(concat)
    x = Bidirectional(RNN(rnn_output_size), name='bidirec1')(x)

    total_features = [x, phonetic_input]
    concat2 = merge(total_features, mode='concat', name='phonetic_merging')

    x = Dense(HIDDEN_DIM,
              activation='relu',
              kernel_initializer='he_normal',
              kernel_constraint=maxnorm(3),
              bias_constraint=maxnorm(3),
              name='dense1')(concat2)
    x = Dropout(0.15, name='drop_single2')(x)
    x = Dense(HIDDEN_DIM,
              kernel_initializer='he_normal',
              activation='tanh',
              kernel_constraint=maxnorm(3),
              bias_constraint=maxnorm(3),
              name='dense2')(x)
    x = Dropout(0.15, name='drop_single3')(x)

    out1 = Dense(n1,
                 kernel_initializer='he_normal',
                 activation='softmax',
                 name='output1')(x)
    out2 = Dense(n2,
                 kernel_initializer='he_normal',
                 activation='softmax',
                 name='output2')(x)
    out3 = Dense(n3,
                 kernel_initializer='he_normal',
                 activation='softmax',
                 name='output3')(x)
    out4 = Dense(n4,
                 kernel_initializer='he_normal',
                 activation='softmax',
                 name='output4')(x)
    out5 = Dense(n5,
                 kernel_initializer='he_normal',
                 activation='softmax',
                 name='output5')(x)
    out6 = Dense(n6,
                 kernel_initializer='he_normal',
                 activation='softmax',
                 name='output6')(x)

    # Luong et al. 2015 attention model
    emb_layer = Embedding(X_vocab_len,
                          EMBEDDING_DIM,
                          input_length=X_max_len,
                          mask_zero=True,
                          name='Embedding_for_seq2seq')

    current_word_embedding, right_word_embedding1, right_word_embedding2,right_word_embedding3, right_word_embedding4, \
     left_word_embedding1, left_word_embedding2, left_word_embedding3, left_word_embedding4 = [emb_layer(i) for i in list_of_inputs]

    # current_word_embedding = smart_merge([ current_word_embedding, right_word_embedding1,  left_word_embedding1])

    encoder, state = GRU(rnn_output_size,
                         return_sequences=True,
                         unroll=True,
                         return_state=True,
                         name='encoder')(current_word_embedding)
    encoder_last = encoder[:, -1, :]

    decoder = emb_layer(decoder_input)
    decoder = GRU(rnn_output_size,
                  return_sequences=True,
                  unroll=True,
                  name='decoder')(decoder, initial_state=[encoder_last])

    attention = dot([decoder, encoder], axes=[2, 2], name='dot')
    attention = Activation('softmax', name='attention')(attention)

    context = dot([attention, encoder], axes=[2, 1], name='dot2')
    decoder_combined_context = concatenate([context, decoder],
                                           name='concatenate')

    outputs = TimeDistributed(Dense(64, activation='tanh'),
                              name='td1')(decoder_combined_context)
    outputs = TimeDistributed(Dense(X_vocab_len, activation='softmax'),
                              name='td2')(outputs)

    all_inputs = [current_word, decoder_input, right_word1, right_word2, right_word3, right_word4, left_word1, left_word2, left_word3,\
         left_word4, phonetic_input]
    all_outputs = [outputs, out1, out2, out3, out4, out5, out6]

    model = Model(input=all_inputs, output=all_outputs)
    opt = Adam()

    return model
コード例 #39
0
def building_residual_block(input_shape,
                            n_feature_maps,
                            kernel_sizes=None,
                            n_skip=2,
                            is_subsample=False,
                            subsample=None):
    '''
    [1] Building block of layers for residual learning.
        Code based on https://github.com/ndronen/modeling/blob/master/modeling/residual.py
        , but modification of (perhaps) incorrect relu(f)+x thing and it's for conv layer
    [2] MaxPooling is used instead of strided convolution to make it easier
        to set size(output of short-cut) == size(output of conv-layers).
        If you want to remove MaxPooling,
           i) change (border_mode in Convolution2D in shortcut), 'same'-->'valid'
           ii) uncomment ZeroPadding2D in conv layers.
               (Then the following Conv2D is not the first layer of this container anymore,
                so you can remove the input_shape in the line 101, the line with comment #'OPTION' )
    [3] It can be used for both cases whether it subsamples or not.
    [4] In the short-cut connection, I used 1x1 convolution to increase #channel.
        It occurs when is_expand_channels == True
    input_shape = (None, num_channel, height, width)
    n_feature_maps: number of feature maps. In ResidualNet it increases whenever image is downsampled.
    kernel_sizes : list or tuple, (3,3) or [3,3] for example
    n_skip       : number of layers to skip
    is_subsample : If it is True, the layers subsamples by *subsample* to reduce the size.
    subsample    : tuple, (2,2) or (1,2) for example. Used only if is_subsample==True
    '''
    # ***** VERBOSE_PART *****
    print('   - New residual block with')
    print('      input shape:', input_shape)
    print('      kernel size:', kernel_sizes)
    # is_expand_channels == True when num_channels increases.
    #    E.g. the very first residual block (e.g. 1->64, 3->128, 128->256, ...)
    is_expand_channels = not (input_shape[0] == n_feature_maps)
    if is_expand_channels:
        print('      - Input channels: %d ---> num feature maps on out: %d' %
              (input_shape[0], n_feature_maps))
    if is_subsample:
        print('      - with subsample:', subsample)
    kernel_row, kernel_col = kernel_sizes
    # set input
    x = Input(shape=(input_shape))
    # ***** SHORTCUT PATH *****
    if is_subsample:  # subsample (+ channel expansion if needed)
        shortcut_y = Convolution2D(n_feature_maps,
                                   kernel_row,
                                   kernel_col,
                                   subsample=subsample,
                                   W_regularizer=l2(0.0001),
                                   border_mode='valid')(x)
    else:  # channel expansion only (e.g. the very first layer of the whole networks)
        if is_expand_channels:
            shortcut_y = Convolution2D(n_feature_maps,
                                       1,
                                       1,
                                       W_regularizer=l2(0.0001),
                                       border_mode='same')(x)
        else:
            # if no subsample and no channel expension, there's nothing to add on the shortcut.
            shortcut_y = x
    # ***** CONVOLUTION_PATH *****
    conv_y = x
    for i in range(n_skip):
        conv_y = BatchNormalization(axis=1)(conv_y)
        conv_y = Activation('relu')(conv_y)
        if i == 0 and is_subsample:  # [Subsample at layer 0 if needed]
            conv_y = Convolution2D(n_feature_maps,
                                   kernel_row,
                                   kernel_col,
                                   subsample=subsample,
                                   W_regularizer=l2(0.0001),
                                   border_mode='valid')(conv_y)
        else:
            conv_y = Convolution2D(n_feature_maps,
                                   kernel_row,
                                   kernel_col,
                                   W_regularizer=l2(0.0001),
                                   border_mode='same')(conv_y)
    # output
    y = merge([shortcut_y, conv_y], mode='sum')
    block = Model(input=x, output=y)
    print('        -- model was built.')
    return block
コード例 #40
0
ファイル: unet.py プロジェクト: escientists/dstl
def get_unet():
    inputs = Input((8, ISZ, ISZ))
    conv1 = Convolution2D(32, 3, 3, activation='relu',
                          border_mode='same')(inputs)
    conv1 = Convolution2D(32, 3, 3, activation='relu',
                          border_mode='same')(conv1)
    pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)

    conv2 = Convolution2D(64, 3, 3, activation='relu',
                          border_mode='same')(pool1)
    conv2 = Convolution2D(64, 3, 3, activation='relu',
                          border_mode='same')(conv2)
    pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)

    conv3 = Convolution2D(128, 3, 3, activation='relu',
                          border_mode='same')(pool2)
    conv3 = Convolution2D(128, 3, 3, activation='relu',
                          border_mode='same')(conv3)
    pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)

    conv4 = Convolution2D(256, 3, 3, activation='relu',
                          border_mode='same')(pool3)
    conv4 = Convolution2D(256, 3, 3, activation='relu',
                          border_mode='same')(conv4)
    pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)

    conv5 = Convolution2D(512, 3, 3, activation='relu',
                          border_mode='same')(pool4)
    conv5 = Convolution2D(512, 3, 3, activation='relu',
                          border_mode='same')(conv5)

    up6 = merge([UpSampling2D(size=(2, 2))(conv5), conv4],
                mode='concat',
                concat_axis=1)
    conv6 = Convolution2D(256, 3, 3, activation='relu',
                          border_mode='same')(up6)
    conv6 = Convolution2D(256, 3, 3, activation='relu',
                          border_mode='same')(conv6)

    up7 = merge([UpSampling2D(size=(2, 2))(conv6), conv3],
                mode='concat',
                concat_axis=1)
    conv7 = Convolution2D(128, 3, 3, activation='relu',
                          border_mode='same')(up7)
    conv7 = Convolution2D(128, 3, 3, activation='relu',
                          border_mode='same')(conv7)

    up8 = merge([UpSampling2D(size=(2, 2))(conv7), conv2],
                mode='concat',
                concat_axis=1)
    conv8 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(up8)
    conv8 = Convolution2D(64, 3, 3, activation='relu',
                          border_mode='same')(conv8)

    up9 = merge([UpSampling2D(size=(2, 2))(conv8), conv1],
                mode='concat',
                concat_axis=1)
    conv9 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(up9)
    conv9 = Convolution2D(32, 3, 3, activation='relu',
                          border_mode='same')(conv9)

    conv10 = Convolution2D(N_Cls, 1, 1, activation='sigmoid')(conv9)

    model = Model(input=inputs, output=conv10)
    model.compile(optimizer=Adam(),
                  loss='binary_crossentropy',
                  metrics=[jaccard_coef, jaccard_coef_int, 'accuracy'])
    return model
コード例 #41
0
                          dropout_U=0.2,
                          return_sequences=True,
                          W_regularizer=l2(0.01))(x5)
            else:
                x5 = LSTM(300,
                          dropout_W=0.2,
                          dropout_U=0.2,
                          return_sequences=True)(x5)

        attention5 = TimeDistributed(Dense(1, activation='tanh'))(x5)
        attention5 = Flatten()(attention5)
        attention5 = Activation('softmax')(attention5)
        attention5 = RepeatVector(600)(attention5)
        attention5 = Permute([2, 1])(attention5)

        merge5 = merge([x5, attention5], mode='mul')
        merge5 = Lambda(lambda xin: K.sum(xin, axis=1))(merge5)
        merge5 = Dense(300, activation='softmax')(merge5)

        model5 = Model(input=model5_ip, output=merge5)
        print model5.summary()
    else:
        model5 = Sequential()
        model5.add(
            Embedding(len(word_index) + 1, 300, input_length=40, dropout=0.2))
        if opts.cnn == 1:
            model5.add(
                Conv1D(64, 5, padding='valid', activation='relu', strides=1))
            model5.add(MaxPooling1D(pool_size=4))
        if opts.bilstm == 1:
            if opts.regularize == 1:
コード例 #42
0
def base_conv(inp):
    branch = Convolution2D(32,
                           3,
                           3,
                           border_mode='same',
                           activation='relu',
                           init='glorot_uniform')(inp)
    branch = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(branch)
    return branch


for i in range(x_train.shape[0]):
    x = base_conv(inputs[i])
    convbranches.append(x)
merged_model = merge([branch for branch in convbranches],
                     mode='sum',
                     concat_axis=-1)
merged_model = Convolution2D(128,
                             3,
                             3,
                             border_mode='same',
                             activation='relu',
                             init='glorot_uniform')(merged_model)
merged_model = Convolution2D(128,
                             3,
                             3,
                             border_mode='same',
                             activation='relu',
                             init='glorot_uniform')(merged_model)
merged_model = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(merged_model)
#merged_model = Convolution2D(256, 3, 3, border_mode = 'same', activation = 'relu', init = 'normal')(merged_model)
コード例 #43
0
def get_unet():
    inputs = Input((1, img_rows, img_cols))
    conv1 = Convolution2D(32, 3, 3, activation='relu',
                          border_mode='same')(inputs)
    conv1 = Convolution2D(32, 3, 3, activation='relu',
                          border_mode='same')(conv1)
    pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)

    conv2 = Convolution2D(64, 3, 3, activation='relu',
                          border_mode='same')(pool1)
    conv2 = Convolution2D(64, 3, 3, activation='relu',
                          border_mode='same')(conv2)
    pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)

    conv3 = Convolution2D(128, 3, 3, activation='relu',
                          border_mode='same')(pool2)
    conv3 = Convolution2D(128, 3, 3, activation='relu',
                          border_mode='same')(conv3)
    pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)

    conv4 = Convolution2D(256, 3, 3, activation='relu',
                          border_mode='same')(pool3)
    conv4 = Convolution2D(256, 3, 3, activation='relu',
                          border_mode='same')(conv4)
    pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)

    conv5 = Convolution2D(512, 3, 3, activation='relu',
                          border_mode='same')(pool4)
    conv5 = Convolution2D(512, 3, 3, activation='relu',
                          border_mode='same')(conv5)

    up6 = merge([UpSampling2D(size=(2, 2))(conv5), conv4],
                mode='concat',
                concat_axis=1)
    conv6 = Convolution2D(256, 3, 3, activation='relu',
                          border_mode='same')(up6)
    conv6 = Convolution2D(256, 3, 3, activation='relu',
                          border_mode='same')(conv6)

    up7 = merge([UpSampling2D(size=(2, 2))(conv6), conv3],
                mode='concat',
                concat_axis=1)
    conv7 = Convolution2D(128, 3, 3, activation='relu',
                          border_mode='same')(up7)
    conv7 = Convolution2D(128, 3, 3, activation='relu',
                          border_mode='same')(conv7)

    up8 = merge([UpSampling2D(size=(2, 2))(conv7), conv2],
                mode='concat',
                concat_axis=1)
    conv8 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(up8)
    conv8 = Convolution2D(64, 3, 3, activation='relu',
                          border_mode='same')(conv8)

    up9 = merge([UpSampling2D(size=(2, 2))(conv8), conv1],
                mode='concat',
                concat_axis=1)
    conv9 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(up9)
    conv9 = Convolution2D(32, 3, 3, activation='relu',
                          border_mode='same')(conv9)

    conv10 = Convolution2D(1, 1, 1, activation='sigmoid')(conv9)

    model = Model(input=inputs, output=conv10)

    model.compile(optimizer=Adam(lr=1.0e-5),
                  loss=dice_coef_loss,
                  metrics=[dice_coef])

    return model
コード例 #44
0
os.environ['KERAS_BACKEND'] = 'mxnet'
# os.environ['KERAS_BACKEND']='tensorflow'

from keras.layers import Input, Lambda, merge
from keras.models import Model
import numpy as np

i1 = np.array([[2, 3], [2, 3], [2, 3], [2, 3], [2, 3]])
i2 = np.array([3, 3, 3, 3, 3])

input1_shape = (2, )
input2_shape = (1, )
model_input1 = Input(shape=input1_shape)
model_input2 = Input(shape=input2_shape)
z = model_input1

print 'keras shape = ', z._keras_shape
print 'shape = ', z.shape

z = Lambda(lambda x: x[:, 1:2])(model_input1)

print 'keras shape = ', z._keras_shape
print 'shape = ', z.shape

output = merge([z, model_input2], mode='mul')

model = Model([model_input1, model_input2], output)
model.compile(optimizer='rmsprop', loss='categorical_crossentropy')

print model.predict([i1, i2])
コード例 #45
0
def calibrate(target, source, sourceIndex, predLabel, path):

    mmdNetLayerSizes = [25, 25]
    l2_penalty = 1e-2
    init = lambda shape, name: initializers.normal(
        shape, scale=.1e-4, name=name)
    space_dim = target.X.shape[1]

    calibInput = Input(shape=(space_dim, ))
    block1_bn1 = BatchNormalization()(calibInput)
    block1_a1 = Activation('relu')(block1_bn1)
    block1_w1 = Dense(mmdNetLayerSizes[0],
                      activation='linear',
                      kernel_regularizer=l2(l2_penalty),
                      init=init)(block1_a1)
    block1_bn2 = BatchNormalization()(block1_w1)
    block1_a2 = Activation('relu')(block1_bn2)
    block1_w2 = Dense(space_dim,
                      activation='linear',
                      kernel_regularizer=l2(l2_penalty),
                      init=init)(block1_a2)
    block1_output = merge([block1_w2, calibInput], mode='sum')
    block2_bn1 = BatchNormalization()(block1_output)
    block2_a1 = Activation('relu')(block2_bn1)
    block2_w1 = Dense(mmdNetLayerSizes[1],
                      activation='linear',
                      kernel_regularizer=l2(l2_penalty),
                      init=init)(block2_a1)
    block2_bn2 = BatchNormalization()(block2_w1)
    block2_a2 = Activation('relu')(block2_bn2)
    block2_w2 = Dense(space_dim,
                      activation='linear',
                      kernel_regularizer=l2(l2_penalty),
                      init=init)(block2_a2)
    block2_output = merge([block2_w2, block1_output], mode='sum')
    block3_bn1 = BatchNormalization()(block2_output)
    block3_a1 = Activation('relu')(block3_bn1)
    block3_w1 = Dense(mmdNetLayerSizes[1],
                      activation='linear',
                      kernel_regularizer=l2(l2_penalty),
                      init=init)(block3_a1)
    block3_bn2 = BatchNormalization()(block3_w1)
    block3_a2 = Activation('relu')(block3_bn2)
    block3_w2 = Dense(space_dim,
                      activation='linear',
                      kernel_regularizer=l2(l2_penalty),
                      init=init)(block3_a2)
    block3_output = merge([block3_w2, block2_output], mode='sum')

    calibMMDNet = Model(inputs=calibInput, outputs=block3_output)

    n = target.X.shape[0]
    p = np.random.permutation(n)
    toTake = p[range(int(.2 * n))]
    targetXMMD = target.X[toTake]
    targetYMMD = target.y[toTake]

    targetXMMD = targetXMMD[targetYMMD != 0]
    targetYMMD = targetYMMD[targetYMMD != 0]

    targetYMMD = np.reshape(targetYMMD, (-1, 1))

    n = source.X.shape[0]
    p = np.random.permutation(n)
    toTake = p[range(int(.2 * n))]
    sourceXMMD = source.X[toTake]
    sourceYMMD = predLabel[toTake]

    sourceXMMD = sourceXMMD[sourceYMMD != 0]
    sourceYMMD = sourceYMMD[sourceYMMD != 0]

    sourceYMMD = np.reshape(sourceYMMD, (-1, 1))

    lrate = LearningRateScheduler(step_decay)
    optimizer = opt.RMSprop(lr=0.0)
    calibMMDNet.compile(
        optimizer=optimizer,
        loss=lambda y_true, y_pred: cf.MMD(
            block3_output, targetXMMD, MMDTargetValidation_split=0.1).
        KerasCost(y_true, y_pred))

    sourceLabels = np.zeros(sourceXMMD.shape[0])

    calibMMDNet.fit(sourceXMMD,
                    sourceLabels,
                    epochs=500,
                    batch_size=1000,
                    validation_split=0.1,
                    verbose=0,
                    callbacks=[
                        lrate,
                        mn.monitorMMD(sourceXMMD, sourceYMMD, targetXMMD,
                                      targetYMMD, calibMMDNet.predict),
                        cb.EarlyStopping(monitor='val_loss',
                                         patience=20,
                                         mode='auto')
                    ])
    plt.close('all')
    calibMMDNet.save_weights(
        os.path.join(io.DeepLearningRoot(),
                     path + '/ResNet' + str(sourceIndex) + '.h5'))
    calibrateSource = Sample(calibMMDNet.predict(source.X), source.y)
    calibMMDNet = None
    return calibrateSource
コード例 #46
0
ファイル: google_models.py プロジェクト: mahfujau/GAN
def generator_google_mnistM(noise_dim, img_source_dim,img_dest_dim,deterministic,pureGAN,wd,suffix=None):
    """DCGAN generator based on Upsampling and Conv2D

    Args:
        noise_dim: Dimension of the noise input
        img_dim: dimension of the image output
        bn_mode: keras batchnorm mode
        model_name: model name (default: {"generator_upsampling"})
        dset: dataset (default: {"mnist"})

    Returns:
        keras model
    """
    s = img_source_dim[1]
#    shp = np.expand_dims(img_dim[1:],1) # to make shp= (None, 1, 28, 28)  but is not working
    start_dim = int(s / 4)
    if K.image_dim_ordering() == "th":
        input_channels = img_source_dim[0]
        output_channels = img_dest_dim[0]
        reshape_shape = (input_channels, s, s)
        shp=reshape_shape

    else:
        input_channels = img_source_dim[-1]
        output_channels = img_dest_dim[-1]
        reshape_shape = (s, s, input_channels)
        shp=reshape_shape 
    gen_noise_input = Input(shape=noise_dim, name="generator_input")
    gen_image_input = Input(shape=shp, name="generator_image_input")

    # Noise input and reshaping
    x = Dense(5*s*s, input_dim=noise_dim,W_regularizer=l2(wd))(gen_noise_input)
    x = Reshape((5,s,s))(x)
    x = Activation("relu")(x)

    if deterministic: #here I link or not link the noise vector to the whole network
        g = gen_image_input
    elif pureGAN:
        g = x 
    else:
        g = merge([gen_image_input, x], mode='concat',concat_axis=1) # because of concat_axis=1, will it work on tensorflow NHWC too? 

    x1 = Conv2D(64, (3, 3), border_mode='same', kernel_initializer="he_normal",W_regularizer=l2(wd))(g) #convolved by 3x3 filter to get 64x55x35
    x1 = Activation('relu')(x1)

    for i in range(4):
        x = Conv2D(64, (3, 3), border_mode='same',weight_norm=False, kernel_initializer="he_normal",W_regularizer=l2(wd))(x1)
        x=BatchNormGAN(axis=1)(x)
        x = Activation('relu')(x)
        x = Conv2D(64, (3, 3), border_mode='same', weight_norm=False, kernel_initializer="he_normal",W_regularizer=l2(wd))(x)
        x=BatchNormGAN(axis=1)(x)
        x1 = merge([x, x1], mode='sum')
        x1 = Activation('relu')(x1)

    # Last Conv to get the output image
    x1 = Conv2D(output_channels, (1, 1),name="gen_conv2d_final", border_mode='same', kernel_initializer="he_normal",W_regularizer=l2(wd))(x1)
    x1 = Activation('tanh')(x1)
    if suffix is None:
        generator_model = Model(input=[gen_noise_input,gen_image_input], output=[x1], name="generator_google1")
    else:
        generator_model = Model(input=[gen_noise_input,gen_image_input], output=[x1], name="generator_google2")
    visualize_model(generator_model)
    return generator_model
コード例 #47
0
ファイル: word2vec_keras.py プロジェクト: flownclouds/d2lzh
    word_context = np.array(word_context, dtype='int32')
    print(couples[:10], labels[:10])

    input_target = Input((1, ))
    input_context = Input((1, ))

    embedding = Embedding(vocab_size,
                          vector_dim,
                          input_length=1,
                          name='embedding')
    target = embedding(input_target)
    target = Reshape((vector_dim, 1))(target)
    context = embedding(input_context)
    context = Reshape((vector_dim, 1))(context)

    similarity = merge([target, context], mode='cos', dot_axes=0)
    # now perform the dot product operation to get a similarity measure
    dot_product = merge([target, context], mode='dot', dot_axes=1)
    dot_product = Reshape((1, ))(dot_product)
    # add the sigmoid output layer
    output = Dense(1, activation='sigmoid')(dot_product)
    # create the primary training model
    model = Model(input=[input_target, input_context], output=output)
    model.compile(loss='binary_crossentropy', optimizer='rmsprop')

    # create a secondary validation model to run our similarity checks during training
    validation_model = Model(input=[input_target, input_context],
                             output=similarity)

    sim_cb = SimilarityCallback(idx2word=index2word)
コード例 #48
0
def googlenet_model(img_rows,
                    img_cols,
                    channel=1,
                    num_classes=None,
                    model_path="../imagenet_models/"):
    """
    GoogLeNet a.k.a. Inception v1 for Keras

    Model Schema is based on
    https://gist.github.com/joelouismarino/a2ede9ab3928f999575423b9887abd14

    ImageNet Pretrained Weights
    https://drive.google.com/open?id=0B319laiAPjU3RE1maU9MMlh2dnc

    Blog Post:
    http://joelouismarino.github.io/blog_posts/blog_googlenet_keras.html

    @param img_rows: Rows in input.
    @param img_cols: Columns in input.
    @param channel: 1 for grayscale, 3 for color.
    @param num_classes: Number of class labels for our classification task.
    @param model_path: Path containing the ImageNet model.

    @return: Model object.
    """

    input = Input(shape=(channel, img_rows, img_cols))
    conv1_7x7_s2 = Convolution2D(64,
                                 7,
                                 7,
                                 subsample=(2, 2),
                                 border_mode='same',
                                 activation='relu',
                                 name='conv1/7x7_s2',
                                 W_regularizer=l2(0.0002))(input)
    conv1_zero_pad = ZeroPadding2D(padding=(1, 1))(conv1_7x7_s2)
    pool1_helper = PoolHelper()(conv1_zero_pad)
    pool1_3x3_s2 = MaxPooling2D(pool_size=(3, 3),
                                strides=(2, 2),
                                border_mode='valid',
                                name='pool1/3x3_s2')(pool1_helper)
    pool1_norm1 = LRN(name='pool1/norm1')(pool1_3x3_s2)
    conv2_3x3_reduce = Convolution2D(64,
                                     1,
                                     1,
                                     border_mode='same',
                                     activation='relu',
                                     name='conv2/3x3_reduce',
                                     W_regularizer=l2(0.0002))(pool1_norm1)
    conv2_3x3 = Convolution2D(192,
                              3,
                              3,
                              border_mode='same',
                              activation='relu',
                              name='conv2/3x3',
                              W_regularizer=l2(0.0002))(conv2_3x3_reduce)
    conv2_norm2 = LRN(name='conv2/norm2')(conv2_3x3)
    conv2_zero_pad = ZeroPadding2D(padding=(1, 1))(conv2_norm2)
    pool2_helper = PoolHelper()(conv2_zero_pad)
    pool2_3x3_s2 = MaxPooling2D(pool_size=(3, 3),
                                strides=(2, 2),
                                border_mode='valid',
                                name='pool2/3x3_s2')(pool2_helper)

    inception_3a_1x1 = Convolution2D(64,
                                     1,
                                     1,
                                     border_mode='same',
                                     activation='relu',
                                     name='inception_3a/1x1',
                                     W_regularizer=l2(0.0002))(pool2_3x3_s2)
    inception_3a_3x3_reduce = Convolution2D(
        96,
        1,
        1,
        border_mode='same',
        activation='relu',
        name='inception_3a/3x3_reduce',
        W_regularizer=l2(0.0002))(pool2_3x3_s2)
    inception_3a_3x3 = Convolution2D(
        128,
        3,
        3,
        border_mode='same',
        activation='relu',
        name='inception_3a/3x3',
        W_regularizer=l2(0.0002))(inception_3a_3x3_reduce)
    inception_3a_5x5_reduce = Convolution2D(
        16,
        1,
        1,
        border_mode='same',
        activation='relu',
        name='inception_3a/5x5_reduce',
        W_regularizer=l2(0.0002))(pool2_3x3_s2)
    inception_3a_5x5 = Convolution2D(
        32,
        5,
        5,
        border_mode='same',
        activation='relu',
        name='inception_3a/5x5',
        W_regularizer=l2(0.0002))(inception_3a_5x5_reduce)
    inception_3a_pool = MaxPooling2D(pool_size=(3, 3),
                                     strides=(1, 1),
                                     border_mode='same',
                                     name='inception_3a/pool')(pool2_3x3_s2)
    inception_3a_pool_proj = Convolution2D(
        32,
        1,
        1,
        border_mode='same',
        activation='relu',
        name='inception_3a/pool_proj',
        W_regularizer=l2(0.0002))(inception_3a_pool)
    inception_3a_output = merge([
        inception_3a_1x1, inception_3a_3x3, inception_3a_5x5,
        inception_3a_pool_proj
    ],
                                mode='concat',
                                concat_axis=1,
                                name='inception_3a/output')

    inception_3b_1x1 = Convolution2D(
        128,
        1,
        1,
        border_mode='same',
        activation='relu',
        name='inception_3b/1x1',
        W_regularizer=l2(0.0002))(inception_3a_output)
    inception_3b_3x3_reduce = Convolution2D(
        128,
        1,
        1,
        border_mode='same',
        activation='relu',
        name='inception_3b/3x3_reduce',
        W_regularizer=l2(0.0002))(inception_3a_output)
    inception_3b_3x3 = Convolution2D(
        192,
        3,
        3,
        border_mode='same',
        activation='relu',
        name='inception_3b/3x3',
        W_regularizer=l2(0.0002))(inception_3b_3x3_reduce)
    inception_3b_5x5_reduce = Convolution2D(
        32,
        1,
        1,
        border_mode='same',
        activation='relu',
        name='inception_3b/5x5_reduce',
        W_regularizer=l2(0.0002))(inception_3a_output)
    inception_3b_5x5 = Convolution2D(
        96,
        5,
        5,
        border_mode='same',
        activation='relu',
        name='inception_3b/5x5',
        W_regularizer=l2(0.0002))(inception_3b_5x5_reduce)
    inception_3b_pool = MaxPooling2D(
        pool_size=(3, 3),
        strides=(1, 1),
        border_mode='same',
        name='inception_3b/pool')(inception_3a_output)
    inception_3b_pool_proj = Convolution2D(
        64,
        1,
        1,
        border_mode='same',
        activation='relu',
        name='inception_3b/pool_proj',
        W_regularizer=l2(0.0002))(inception_3b_pool)
    inception_3b_output = merge([
        inception_3b_1x1, inception_3b_3x3, inception_3b_5x5,
        inception_3b_pool_proj
    ],
                                mode='concat',
                                concat_axis=1,
                                name='inception_3b/output')

    inception_3b_output_zero_pad = ZeroPadding2D(
        padding=(1, 1))(inception_3b_output)
    pool3_helper = PoolHelper()(inception_3b_output_zero_pad)
    pool3_3x3_s2 = MaxPooling2D(pool_size=(3, 3),
                                strides=(2, 2),
                                border_mode='valid',
                                name='pool3/3x3_s2')(pool3_helper)

    inception_4a_1x1 = Convolution2D(192,
                                     1,
                                     1,
                                     border_mode='same',
                                     activation='relu',
                                     name='inception_4a/1x1',
                                     W_regularizer=l2(0.0002))(pool3_3x3_s2)
    inception_4a_3x3_reduce = Convolution2D(
        96,
        1,
        1,
        border_mode='same',
        activation='relu',
        name='inception_4a/3x3_reduce',
        W_regularizer=l2(0.0002))(pool3_3x3_s2)
    inception_4a_3x3 = Convolution2D(
        208,
        3,
        3,
        border_mode='same',
        activation='relu',
        name='inception_4a/3x3',
        W_regularizer=l2(0.0002))(inception_4a_3x3_reduce)
    inception_4a_5x5_reduce = Convolution2D(
        16,
        1,
        1,
        border_mode='same',
        activation='relu',
        name='inception_4a/5x5_reduce',
        W_regularizer=l2(0.0002))(pool3_3x3_s2)
    inception_4a_5x5 = Convolution2D(
        48,
        5,
        5,
        border_mode='same',
        activation='relu',
        name='inception_4a/5x5',
        W_regularizer=l2(0.0002))(inception_4a_5x5_reduce)
    inception_4a_pool = MaxPooling2D(pool_size=(3, 3),
                                     strides=(1, 1),
                                     border_mode='same',
                                     name='inception_4a/pool')(pool3_3x3_s2)
    inception_4a_pool_proj = Convolution2D(
        64,
        1,
        1,
        border_mode='same',
        activation='relu',
        name='inception_4a/pool_proj',
        W_regularizer=l2(0.0002))(inception_4a_pool)
    inception_4a_output = merge([
        inception_4a_1x1, inception_4a_3x3, inception_4a_5x5,
        inception_4a_pool_proj
    ],
                                mode='concat',
                                concat_axis=1,
                                name='inception_4a/output')

    loss1_ave_pool = AveragePooling2D(
        pool_size=(5, 5), strides=(3, 3),
        name='loss1/ave_pool')(inception_4a_output)
    loss1_conv = Convolution2D(128,
                               1,
                               1,
                               border_mode='same',
                               activation='relu',
                               name='loss1/conv',
                               W_regularizer=l2(0.0002))(loss1_ave_pool)
    loss1_flat = Flatten()(loss1_conv)
    loss1_fc = Dense(1024,
                     activation='relu',
                     name='loss1/fc',
                     W_regularizer=l2(0.0002))(loss1_flat)
    loss1_drop_fc = Dropout(0.7)(loss1_fc)
    loss1_classifier = Dense(1000,
                             name='loss1/classifier',
                             W_regularizer=l2(0.0002))(loss1_drop_fc)
    loss1_classifier_act = Activation('softmax')(loss1_classifier)

    inception_4b_1x1 = Convolution2D(
        160,
        1,
        1,
        border_mode='same',
        activation='relu',
        name='inception_4b/1x1',
        W_regularizer=l2(0.0002))(inception_4a_output)
    inception_4b_3x3_reduce = Convolution2D(
        112,
        1,
        1,
        border_mode='same',
        activation='relu',
        name='inception_4b/3x3_reduce',
        W_regularizer=l2(0.0002))(inception_4a_output)
    inception_4b_3x3 = Convolution2D(
        224,
        3,
        3,
        border_mode='same',
        activation='relu',
        name='inception_4b/3x3',
        W_regularizer=l2(0.0002))(inception_4b_3x3_reduce)
    inception_4b_5x5_reduce = Convolution2D(
        24,
        1,
        1,
        border_mode='same',
        activation='relu',
        name='inception_4b/5x5_reduce',
        W_regularizer=l2(0.0002))(inception_4a_output)
    inception_4b_5x5 = Convolution2D(
        64,
        5,
        5,
        border_mode='same',
        activation='relu',
        name='inception_4b/5x5',
        W_regularizer=l2(0.0002))(inception_4b_5x5_reduce)
    inception_4b_pool = MaxPooling2D(
        pool_size=(3, 3),
        strides=(1, 1),
        border_mode='same',
        name='inception_4b/pool')(inception_4a_output)
    inception_4b_pool_proj = Convolution2D(
        64,
        1,
        1,
        border_mode='same',
        activation='relu',
        name='inception_4b/pool_proj',
        W_regularizer=l2(0.0002))(inception_4b_pool)
    inception_4b_output = merge([
        inception_4b_1x1, inception_4b_3x3, inception_4b_5x5,
        inception_4b_pool_proj
    ],
                                mode='concat',
                                concat_axis=1,
                                name='inception_4b_output')

    inception_4c_1x1 = Convolution2D(
        128,
        1,
        1,
        border_mode='same',
        activation='relu',
        name='inception_4c/1x1',
        W_regularizer=l2(0.0002))(inception_4b_output)
    inception_4c_3x3_reduce = Convolution2D(
        128,
        1,
        1,
        border_mode='same',
        activation='relu',
        name='inception_4c/3x3_reduce',
        W_regularizer=l2(0.0002))(inception_4b_output)
    inception_4c_3x3 = Convolution2D(
        256,
        3,
        3,
        border_mode='same',
        activation='relu',
        name='inception_4c/3x3',
        W_regularizer=l2(0.0002))(inception_4c_3x3_reduce)
    inception_4c_5x5_reduce = Convolution2D(
        24,
        1,
        1,
        border_mode='same',
        activation='relu',
        name='inception_4c/5x5_reduce',
        W_regularizer=l2(0.0002))(inception_4b_output)
    inception_4c_5x5 = Convolution2D(
        64,
        5,
        5,
        border_mode='same',
        activation='relu',
        name='inception_4c/5x5',
        W_regularizer=l2(0.0002))(inception_4c_5x5_reduce)
    inception_4c_pool = MaxPooling2D(
        pool_size=(3, 3),
        strides=(1, 1),
        border_mode='same',
        name='inception_4c/pool')(inception_4b_output)
    inception_4c_pool_proj = Convolution2D(
        64,
        1,
        1,
        border_mode='same',
        activation='relu',
        name='inception_4c/pool_proj',
        W_regularizer=l2(0.0002))(inception_4c_pool)
    inception_4c_output = merge([
        inception_4c_1x1, inception_4c_3x3, inception_4c_5x5,
        inception_4c_pool_proj
    ],
                                mode='concat',
                                concat_axis=1,
                                name='inception_4c/output')

    inception_4d_1x1 = Convolution2D(
        112,
        1,
        1,
        border_mode='same',
        activation='relu',
        name='inception_4d/1x1',
        W_regularizer=l2(0.0002))(inception_4c_output)
    inception_4d_3x3_reduce = Convolution2D(
        144,
        1,
        1,
        border_mode='same',
        activation='relu',
        name='inception_4d/3x3_reduce',
        W_regularizer=l2(0.0002))(inception_4c_output)
    inception_4d_3x3 = Convolution2D(
        288,
        3,
        3,
        border_mode='same',
        activation='relu',
        name='inception_4d/3x3',
        W_regularizer=l2(0.0002))(inception_4d_3x3_reduce)
    inception_4d_5x5_reduce = Convolution2D(
        32,
        1,
        1,
        border_mode='same',
        activation='relu',
        name='inception_4d/5x5_reduce',
        W_regularizer=l2(0.0002))(inception_4c_output)
    inception_4d_5x5 = Convolution2D(
        64,
        5,
        5,
        border_mode='same',
        activation='relu',
        name='inception_4d/5x5',
        W_regularizer=l2(0.0002))(inception_4d_5x5_reduce)
    inception_4d_pool = MaxPooling2D(
        pool_size=(3, 3),
        strides=(1, 1),
        border_mode='same',
        name='inception_4d/pool')(inception_4c_output)
    inception_4d_pool_proj = Convolution2D(
        64,
        1,
        1,
        border_mode='same',
        activation='relu',
        name='inception_4d/pool_proj',
        W_regularizer=l2(0.0002))(inception_4d_pool)
    inception_4d_output = merge([
        inception_4d_1x1, inception_4d_3x3, inception_4d_5x5,
        inception_4d_pool_proj
    ],
                                mode='concat',
                                concat_axis=1,
                                name='inception_4d/output')

    loss2_ave_pool = AveragePooling2D(
        pool_size=(5, 5), strides=(3, 3),
        name='loss2/ave_pool')(inception_4d_output)
    loss2_conv = Convolution2D(128,
                               1,
                               1,
                               border_mode='same',
                               activation='relu',
                               name='loss2/conv',
                               W_regularizer=l2(0.0002))(loss2_ave_pool)
    loss2_flat = Flatten()(loss2_conv)
    loss2_fc = Dense(1024,
                     activation='relu',
                     name='loss2/fc',
                     W_regularizer=l2(0.0002))(loss2_flat)
    loss2_drop_fc = Dropout(0.7)(loss2_fc)
    loss2_classifier = Dense(1000,
                             name='loss2/classifier',
                             W_regularizer=l2(0.0002))(loss2_drop_fc)
    loss2_classifier_act = Activation('softmax')(loss2_classifier)

    inception_4e_1x1 = Convolution2D(
        256,
        1,
        1,
        border_mode='same',
        activation='relu',
        name='inception_4e/1x1',
        W_regularizer=l2(0.0002))(inception_4d_output)
    inception_4e_3x3_reduce = Convolution2D(
        160,
        1,
        1,
        border_mode='same',
        activation='relu',
        name='inception_4e/3x3_reduce',
        W_regularizer=l2(0.0002))(inception_4d_output)
    inception_4e_3x3 = Convolution2D(
        320,
        3,
        3,
        border_mode='same',
        activation='relu',
        name='inception_4e/3x3',
        W_regularizer=l2(0.0002))(inception_4e_3x3_reduce)
    inception_4e_5x5_reduce = Convolution2D(
        32,
        1,
        1,
        border_mode='same',
        activation='relu',
        name='inception_4e/5x5_reduce',
        W_regularizer=l2(0.0002))(inception_4d_output)
    inception_4e_5x5 = Convolution2D(
        128,
        5,
        5,
        border_mode='same',
        activation='relu',
        name='inception_4e/5x5',
        W_regularizer=l2(0.0002))(inception_4e_5x5_reduce)
    inception_4e_pool = MaxPooling2D(
        pool_size=(3, 3),
        strides=(1, 1),
        border_mode='same',
        name='inception_4e/pool')(inception_4d_output)
    inception_4e_pool_proj = Convolution2D(
        128,
        1,
        1,
        border_mode='same',
        activation='relu',
        name='inception_4e/pool_proj',
        W_regularizer=l2(0.0002))(inception_4e_pool)
    inception_4e_output = merge([
        inception_4e_1x1, inception_4e_3x3, inception_4e_5x5,
        inception_4e_pool_proj
    ],
                                mode='concat',
                                concat_axis=1,
                                name='inception_4e/output')

    inception_4e_output_zero_pad = ZeroPadding2D(
        padding=(1, 1))(inception_4e_output)
    pool4_helper = PoolHelper()(inception_4e_output_zero_pad)
    pool4_3x3_s2 = MaxPooling2D(pool_size=(3, 3),
                                strides=(2, 2),
                                border_mode='valid',
                                name='pool4/3x3_s2')(pool4_helper)

    inception_5a_1x1 = Convolution2D(256,
                                     1,
                                     1,
                                     border_mode='same',
                                     activation='relu',
                                     name='inception_5a/1x1',
                                     W_regularizer=l2(0.0002))(pool4_3x3_s2)
    inception_5a_3x3_reduce = Convolution2D(
        160,
        1,
        1,
        border_mode='same',
        activation='relu',
        name='inception_5a/3x3_reduce',
        W_regularizer=l2(0.0002))(pool4_3x3_s2)
    inception_5a_3x3 = Convolution2D(
        320,
        3,
        3,
        border_mode='same',
        activation='relu',
        name='inception_5a/3x3',
        W_regularizer=l2(0.0002))(inception_5a_3x3_reduce)
    inception_5a_5x5_reduce = Convolution2D(
        32,
        1,
        1,
        border_mode='same',
        activation='relu',
        name='inception_5a/5x5_reduce',
        W_regularizer=l2(0.0002))(pool4_3x3_s2)
    inception_5a_5x5 = Convolution2D(
        128,
        5,
        5,
        border_mode='same',
        activation='relu',
        name='inception_5a/5x5',
        W_regularizer=l2(0.0002))(inception_5a_5x5_reduce)
    inception_5a_pool = MaxPooling2D(pool_size=(3, 3),
                                     strides=(1, 1),
                                     border_mode='same',
                                     name='inception_5a/pool')(pool4_3x3_s2)
    inception_5a_pool_proj = Convolution2D(
        128,
        1,
        1,
        border_mode='same',
        activation='relu',
        name='inception_5a/pool_proj',
        W_regularizer=l2(0.0002))(inception_5a_pool)
    inception_5a_output = merge([
        inception_5a_1x1, inception_5a_3x3, inception_5a_5x5,
        inception_5a_pool_proj
    ],
                                mode='concat',
                                concat_axis=1,
                                name='inception_5a/output')

    inception_5b_1x1 = Convolution2D(
        384,
        1,
        1,
        border_mode='same',
        activation='relu',
        name='inception_5b/1x1',
        W_regularizer=l2(0.0002))(inception_5a_output)
    inception_5b_3x3_reduce = Convolution2D(
        192,
        1,
        1,
        border_mode='same',
        activation='relu',
        name='inception_5b/3x3_reduce',
        W_regularizer=l2(0.0002))(inception_5a_output)
    inception_5b_3x3 = Convolution2D(
        384,
        3,
        3,
        border_mode='same',
        activation='relu',
        name='inception_5b/3x3',
        W_regularizer=l2(0.0002))(inception_5b_3x3_reduce)
    inception_5b_5x5_reduce = Convolution2D(
        48,
        1,
        1,
        border_mode='same',
        activation='relu',
        name='inception_5b/5x5_reduce',
        W_regularizer=l2(0.0002))(inception_5a_output)
    inception_5b_5x5 = Convolution2D(
        128,
        5,
        5,
        border_mode='same',
        activation='relu',
        name='inception_5b/5x5',
        W_regularizer=l2(0.0002))(inception_5b_5x5_reduce)
    inception_5b_pool = MaxPooling2D(
        pool_size=(3, 3),
        strides=(1, 1),
        border_mode='same',
        name='inception_5b/pool')(inception_5a_output)
    inception_5b_pool_proj = Convolution2D(
        128,
        1,
        1,
        border_mode='same',
        activation='relu',
        name='inception_5b/pool_proj',
        W_regularizer=l2(0.0002))(inception_5b_pool)
    inception_5b_output = merge([
        inception_5b_1x1, inception_5b_3x3, inception_5b_5x5,
        inception_5b_pool_proj
    ],
                                mode='concat',
                                concat_axis=1,
                                name='inception_5b/output')

    pool5_7x7_s1 = AveragePooling2D(pool_size=(7, 7),
                                    strides=(1, 1),
                                    name='pool5/7x7_s2')(inception_5b_output)
    loss3_flat = Flatten()(pool5_7x7_s1)
    pool5_drop_7x7_s1 = Dropout(0.4)(loss3_flat)
    loss3_classifier = Dense(1000,
                             name='loss3/classifier',
                             W_regularizer=l2(0.0002))(pool5_drop_7x7_s1)
    loss3_classifier_act = Activation('softmax', name='prob')(loss3_classifier)

    # Create model
    model = Model(input=input,
                  output=[
                      loss1_classifier_act, loss2_classifier_act,
                      loss3_classifier_act
                  ])

    # Load ImageNet pre-trained data
    model.load_weights(model_path + 'googlenet_weights.h5')

    # Truncate and replace softmax layer for transfer learning
    # Cannot use model.layers.pop() since model is not of Sequential() type
    # The method below works since pre-trained weights are stored in layers but not in the model
    loss1_classifier_statefarm = Dense(num_classes,
                                       name='loss1/classifier',
                                       W_regularizer=l2(0.0002))(loss1_drop_fc)
    loss1_classifier_act_statefarm = Activation('softmax')(
        loss1_classifier_statefarm)

    # Create another model with our customized softmax
    model = Model(input=input, output=[loss1_classifier_act_statefarm])

    # Learning rate is changed to 0.001
    sgd = SGD(lr=1e-3, decay=1e-6, momentum=0.9, nesterov=True)
    model.compile(optimizer=sgd,
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

    return model
コード例 #49
0
def main(layers1=[200],
         layers2=[200],
         leaky_alpha=0.10,
         ENV_NAME='EnvPong',
         show=False,
         wall_reward=-0.1,
         touch_reward=0.3,
         n_steps=80000,
         n_alternances=10,
         L_R=0.0001,
         only_test=False,
         opp_aware=[1, 1],
         myopie=[0.00, 0.00],
         ball_speed=1.0,
         weights1_name='',
         weights2_name=''):

    ENV_NAME = ENV_NAME

    conf_name = "{}_layers1={}__layers2={}__leaky={}__lr={}__opp={}__myopia={}__speed={}".format(
        ENV_NAME, layers1, layers2, leaky_alpha, L_R, opp_aware, myopie,
        ball_speed)
    #gym.undo_logger_setup()
    # Get the environment and extract the number of actions.

    if ENV_NAME == 'Env2D':
        env = Game2D(2.)
    elif ENV_NAME == 'Env2DSoloSpin':
        env = Game2DSolo(2., spinRacket=True)
    elif ENV_NAME == 'Env3DSolo':
        env = Game3DSolo(2., 9.8, 0.5, 7., 3.)
    elif ENV_NAME == 'EnvPong':
        env = Pong(PongPlayer(None, opp_aware=(opp_aware[0] == 1)),
                   PongPlayer(None, opp_aware=(opp_aware[1] == 1)))
    np.random.seed(123)
    #env.seed(123)
    assert len(env.action_space.shape) == 1
    nb_actions = env.action_space.shape[0]

    # Next, we build a very simple model.
    actor = Sequential()
    actor.add(Flatten(input_shape=(1, ) + env.observation_space_1.shape))
    #actor.add(keras.layers.normalization.BatchNormalization())
    for size in layers1:
        actor.add(
            Dense(size,
                  kernel_initializer=RandomUniform(minval=-0.005,
                                                   maxval=0.005,
                                                   seed=None)))
        #actor.add(keras.layers.core.Dropout(0.2))
        actor.add(LeakyReLU(leaky_alpha))
    #actor.add(keras.layers.normalization.BatchNormalization())
    actor.add(
        Dense(nb_actions,
              kernel_initializer=RandomUniform(minval=-0.005,
                                               maxval=0.005,
                                               seed=None),
              bias_regularizer=regularizers.l2(0.01)))
    #actor.add(keras.layers.core.Dropout(0.2))
    actor.add(Activation('linear'))
    print(actor.summary())

    action_input = Input(shape=(nb_actions, ), name='action_input')
    observation_input = Input(shape=(1, ) + env.observation_space_1.shape,
                              name='observation_input')
    flattened_observation = Flatten()(observation_input)
    x = merge([action_input, flattened_observation], mode='concat')
    #x = keras.layers.normalization.BatchNormalization()(x)
    for size in layers1:
        x = Dense(size)(x)
        #x = keras.layers.core.Dropout(0.2)(x)
        x = LeakyReLU(alpha=leaky_alpha)(x)
    #x = keras.layers.normalization.BatchNormalization()(x)
    x = Dense(1)(x)
    x = Activation('linear')(x)
    critic = Model(input=[action_input, observation_input], output=x)
    print(critic.summary())

    actor2 = Sequential()
    actor2.add(Flatten(input_shape=(1, ) + env.observation_space_2.shape))
    #actor2.add(keras.layers.normalization.BatchNormalization())
    for size in layers2:
        actor2.add(
            Dense(size,
                  kernel_initializer=RandomUniform(minval=-0.005,
                                                   maxval=0.005,
                                                   seed=None)))
        #actor2.add(keras.layers.core.Dropout(0.2))
        actor2.add(LeakyReLU(alpha=leaky_alpha))
    actor2.add(
        Dense(nb_actions,
              kernel_initializer=RandomUniform(minval=-0.005,
                                               maxval=0.005,
                                               seed=None),
              bias_regularizer=regularizers.l2(0.01)))
    #actor2.add(keras.layers.core.Dropout(0.2))
    actor2.add(Activation('linear'))
    print(actor2.summary())

    action_input2 = Input(shape=(nb_actions, ), name='action_input')
    observation_input2 = Input(shape=(1, ) + env.observation_space_2.shape,
                               name='observation_input')
    flattened_observation2 = Flatten()(observation_input2)
    x2 = merge([action_input2, flattened_observation2], mode='concat')
    #x2 = keras.layers.normalization.BatchNormalization()(x2)
    for size in layers2:
        x2 = Dense(size)(x2)
        #x2 = keras.layers.core.Dropout(0.2)(x2)
        x2 = LeakyReLU(alpha=leaky_alpha)(x2)
    x2 = Dense(1)(x2)
    x2 = Activation('linear')(x2)
    critic2 = Model(input=[action_input2, observation_input2], output=x2)
    print(critic2.summary())

    # Finally, we configure and compile our agent. You can use every built-in Keras optimizer and
    # even the metrics!
    memory1 = SequentialMemory(limit=50000, window_length=1)
    if opp_aware[0] != opp_aware[1]:
        memory2 = SequentialMemory(limit=50000, window_length=1)
    else:
        memory2 = memory1
    random_process1 = OrnsteinUhlenbeckProcess(size=nb_actions,
                                               theta=.1,
                                               mu=0.,
                                               sigma=.15,
                                               sigma_min=0.,
                                               n_steps_annealing=n_steps /
                                               4)  # Explores less at the end ?
    random_process2 = OrnsteinUhlenbeckProcess(size=nb_actions,
                                               theta=.1,
                                               mu=0.,
                                               sigma=.15,
                                               sigma_min=0.,
                                               n_steps_annealing=4 * n_steps)
    agent1 = DDPGAgent(nb_actions=nb_actions,
                       actor=actor,
                       critic=critic,
                       critic_action_input=action_input,
                       memory=memory1,
                       nb_steps_warmup_critic=5000,
                       nb_steps_warmup_actor=5000,
                       random_process=random_process1,
                       gamma=.99,
                       target_model_update=1e-3,
                       batch_size=100)
    agent2 = DDPGAgent(nb_actions=nb_actions,
                       actor=actor2,
                       critic=critic2,
                       critic_action_input=action_input2,
                       memory=memory2,
                       nb_steps_warmup_critic=5000,
                       nb_steps_warmup_actor=5000,
                       random_process=random_process2,
                       gamma=.99,
                       target_model_update=1e-3,
                       batch_size=100)

    #agent.compile(Adam(lr=L_R, clipnorm=1., clipvalue=0.5), metrics=['mae'])
    agent1.compile(Adam(lr=L_R, clipnorm=1.), metrics=['mae'])
    agent2.compile(Adam(lr=L_R, clipnorm=1.), metrics=['mae'])

    player1 = PongPlayer(agent1,
                         myopie=myopie[0],
                         opp_aware=(opp_aware[0] == 1))
    player2 = PongPlayer(agent2,
                         myopie=myopie[1],
                         opp_aware=(opp_aware[1] == 1))

    # Grid -4
    # Add -1 when lost
    # CEM method

    directory_log = "logs/ddpg/{}".format(conf_name)
    directory_weights = "weights/ddpg/{}".format(conf_name)

    if not os.path.exists(directory_log):
        os.makedirs(directory_log)
    if not os.path.exists(directory_weights):
        os.makedirs(directory_weights)

    if only_test:
        '''if weights1_name =='':
            weights1_name = "{}/player1_final".format(directory_weights)
        if weights2_name == '':
            weights2_name = "{}/player2_final".format(directory_weights)
        #if os.path.isfile(weights1_name) and os.path.isfile(weights2_name):
        agent1.load_weights(weights1_name)
        agent2.load_weights(weights2_name)'''

        agent1.load_weights("{}/player1_{}".format(directory_weights, "final"))
        agent2.load_weights("{}/player1_{}".format(directory_weights, "final"))

        env = makeEnv(player1, player2, ENV_NAME, ball_speed=ball_speed)
        for i in range(10):
            playPong(env)
        confrontPlayers(env)
        plotStrategy(env)

    else:

        for i in range(n_alternances):

            print "Alternance n {} \n".format(i)

            def learning_rate_schedule(epoch):
                return L_R

            if ENV_NAME == 'Env2D':
                env = Game2D(agent2,
                             wall_reward=wall_reward,
                             touch_reward=touch_reward)
            elif ENV_NAME == 'EnvPong':
                env = Pong(player1,
                           player2,
                           wall_reward=wall_reward,
                           touch_reward=touch_reward,
                           ball_speed=ball_speed)
            agent1.fit(env,
                       nb_steps=n_steps,
                       visualize=False,
                       verbose=1,
                       until_score=True,
                       score_to_reach=0.5,
                       last_episodes=500,
                       nb_max_episode_steps=None,
                       callbacks=[
                           FileLogger("{}/player1_{}.h5f".format(
                               directory_log, i)),
                           keras.callbacks.LearningRateScheduler(
                               learning_rate_schedule)
                       ])
            agent1.test(env,
                        nb_episodes=100,
                        visualize=False,
                        nb_max_episode_steps=500,
                        verbose=1)
            agent1.save_weights("{}/player1_{}".format(directory_weights, i),
                                overwrite=True)
            agent1.memory = SequentialMemory(limit=500000, window_length=1)
            wall_reward = wall_reward * 0.8
            touch_reward = touch_reward * 0.8
            agent2.load_weights("{}/player1_{}".format(directory_weights, i))

        print "Fin de {}".format(conf_name)
        env = Pong(player1,
                   player2,
                   wall_reward=wall_reward,
                   touch_reward=touch_reward,
                   ball_speed=ball_speed)

        #agent1.fit(env, nb_steps=150000, visualize=False, verbose=2, nb_max_episode_steps=None,callbacks=[FileLogger("logs/ddpg/{}_weights_steps_leaky_reg_bias_drop_lr{}.h5f".format(ENV_NAME,L_R), interval=100)])
        agent1.save_weights("{}/player1_final".format(directory_weights),
                            overwrite=True)
        agent2.save_weights("{}/player2_final".format(directory_weights),
                            overwrite=True)

        agent1.test(env,
                    nb_episodes=15,
                    visualize=False,
                    nb_max_episode_steps=500,
                    verbose=2)

    if show == True:

        if ENV_NAME == 'Env2D':
            for i in range(10):
                play2D(player1=agent1, player2=agent1)
        elif ENV_NAME == 'EnvPong':
            for i in range(10):
                playPong(left=agent1, right=agent2)
コード例 #50
0
def conv_block(input_tensor,
               kernel_size,
               filters,
               stage,
               block,
               strides=(2, 2)):
    '''conv_block is the block that has a conv layer at shortcut
    # Arguments
        input_tensor: input tensor
        kernel_size: defualt 3, the kernel size of middle conv layer at main path
        filters: list of integers, the nb_filters of 3 conv layer at main path
        stage: integer, current stage label, used for generating layer names
        block: 'a','b'..., current block label, used for generating layer names
    Note that from stage 3, the first conv layer at main path is with subsample=(2,2)
    And the shortcut should have subsample=(2,2) as well
    '''
    eps = 1.1e-5
    nb_filter1, nb_filter2, nb_filter3 = filters
    conv_name_base = 'res' + str(stage) + block + '_branch'
    bn_name_base = 'bn' + str(stage) + block + '_branch'
    scale_name_base = 'scale' + str(stage) + block + '_branch'

    x = Convolution2D(nb_filter1,
                      1,
                      1,
                      subsample=strides,
                      name=conv_name_base + '2a',
                      bias=False)(input_tensor)
    x = BatchNormalization(epsilon=eps, axis=bn_axis,
                           name=bn_name_base + '2a')(x)
    x = Scale(axis=bn_axis, name=scale_name_base + '2a')(x)
    x = Activation('relu', name=conv_name_base + '2a_relu')(x)

    x = ZeroPadding2D((1, 1), name=conv_name_base + '2b_zeropadding')(x)
    x = Convolution2D(nb_filter2,
                      kernel_size,
                      kernel_size,
                      name=conv_name_base + '2b',
                      bias=False)(x)
    x = BatchNormalization(epsilon=eps, axis=bn_axis,
                           name=bn_name_base + '2b')(x)
    x = Scale(axis=bn_axis, name=scale_name_base + '2b')(x)
    x = Activation('relu', name=conv_name_base + '2b_relu')(x)

    x = Convolution2D(nb_filter3, 1, 1, name=conv_name_base + '2c',
                      bias=False)(x)
    x = BatchNormalization(epsilon=eps, axis=bn_axis,
                           name=bn_name_base + '2c')(x)
    x = Scale(axis=bn_axis, name=scale_name_base + '2c')(x)

    shortcut = Convolution2D(nb_filter3,
                             1,
                             1,
                             subsample=strides,
                             name=conv_name_base + '1',
                             bias=False)(input_tensor)
    shortcut = BatchNormalization(epsilon=eps,
                                  axis=bn_axis,
                                  name=bn_name_base + '1')(shortcut)
    shortcut = Scale(axis=bn_axis, name=scale_name_base + '1')(shortcut)

    x = merge([x, shortcut], mode='sum', name='res' + str(stage) + block)
    x = Activation('relu', name='res' + str(stage) + block + '_relu')(x)
    return x
コード例 #51
0
    def vaeBuild(self):
        in_dim = self.in_dim
        expr_in = Input(shape=(self.in_dim, ))
        h0 = NoiseLayer(0.8)(expr_in)
        #h0 = Dropout(0.2)(expr_in)
        ## Encoder layers
        h1 = Dense(units=512, name='encoder_1')(h0)
        #h1_relu = Activation('relu')(h1)
        #h1_drop = Dropout(0.3)(h1)

        h2 = Dense(units=128, name='encoder_2')(h1)
        h2_relu = Activation('relu')(h2)
        #h2_relu = Dropout(0.3)(h2_relu)

        h3 = Dense(units=32, name='encoder_3')(h2_relu)
        h3_relu = Activation('relu')(h3)
        #h3_relu = Dropout(0.3)(h3_relu)

        z_mean = Dense(units=2, name='z_mean')(h3_relu)
        z_log_var = Dense(units=2, name='z_log_var')(h3_relu)
        #z_log_var = Lambda( lambda x:K.log(x) )(z_log_var)

        drop_ratio = Dense(units=1, name='drop_ratio',
                           activation='sigmoid')(h3_relu)
        drop_ratio = RepeatVector(self.in_dim)(drop_ratio)
        drop_ratio = Reshape(target_shape=(self.in_dim, ))(drop_ratio)

        ## sampling new samples
        z = Lambda(sampling, output_shape=(2, ))([z_mean, z_log_var])

        ## Decoder layers
        #decoder_h1 = Dense( units=32,name='decoder_1' )(z)
        #decoder_h1_relu = Activation('relu')(decoder_h1)
        decoder_h2 = Dense(units=128, name='decoder_2')(z)
        decoder_h2_relu = Activation('relu')(decoder_h2)
        decoder_h2_relu = Dropout(0.3)(decoder_h2_relu)

        decoder_h3 = Dense(units=512, name='decoder_3')(decoder_h2_relu)
        decoder_h3_relu = Activation('relu')(decoder_h3)
        decoder_h3_relu = Dropout(0.3)(decoder_h3_relu)

        expr_x_tanh = Dense(units=self.in_dim,
                            activation='tanh')(decoder_h3_relu)
        #expr_x = Lambda( lambda x:x*0.6 )(expr_x)
        expr_x = Activation('relu')(expr_x_tanh)

        expr_x_drop = Lambda(lambda x: -x**2)(expr_x)
        expr_x_drop_log = merge(
            [drop_ratio, expr_x_drop],
            mode='mul')  ###  log p_drop =  log(exp(-\lambda x^2))
        expr_x_drop_p = Lambda(lambda x: K.exp(x))(expr_x_drop_log)
        expr_x_nondrop_p = Lambda(lambda x: 1 - x)(expr_x_drop_p)
        expr_x_nondrop_log = Lambda(lambda x: K.log(x + 1e-20))(
            expr_x_nondrop_p)

        expr_x_drop_log = Reshape(target_shape=(self.in_dim,
                                                1))(expr_x_drop_log)
        expr_x_nondrop_log = Reshape(target_shape=(self.in_dim,
                                                   1))(expr_x_nondrop_log)

        logits = merge([expr_x_drop_log, expr_x_nondrop_log],
                       mode='concat',
                       concat_axis=-1)
        samples = Lambda(gumbel_softmax, output_shape=(
            self.in_dim,
            2,
        ))(logits)

        samples = Lambda(lambda x: x[:, :, 1])(samples)
        samples = Reshape(target_shape=(self.in_dim, ))(samples)

        #print(samples.shape)
        out = merge([expr_x, samples], mode='mul')

        class VariationalLayer(Layer):
            def __init__(self, **kwargs):
                self.is_placeholder = True
                super(VariationalLayer, self).__init__(**kwargs)

            def vae_loss(self, x, x_decoded_mean):
                xent_loss = in_dim * metrics.binary_crossentropy(
                    x, x_decoded_mean)
                kl_loss = -0.5 * K.sum(
                    1 + z_log_var - K.square(z_mean) - K.exp(z_log_var),
                    axis=-1)
                return K.mean(xent_loss + kl_loss)

            def call(self, inputs):
                x = inputs[0]
                x_decoded_mean = inputs[1]
                loss = self.vae_loss(x, x_decoded_mean)
                self.add_loss(loss, inputs=inputs)
                # We won't actually use the output.
                return x

        y = VariationalLayer()([expr_in, out])
        vae = Model(inputs=expr_in, outputs=y)

        opt = RMSprop(lr=0.0001)
        vae.compile(optimizer=opt, loss=None)

        ae = Model(inputs=expr_in,
                   outputs=[h1, h2, h3, z_mean, decoder_h2, decoder_h3])
        aux = Model(inputs=expr_in,
                    outputs=[expr_x_tanh, expr_x, samples, out, drop_ratio])

        self.vae = vae
        self.ae = ae
        self.aux = aux
コード例 #52
0
    def get_unet(self):
        inputs = Input((self.img_rows, self.img_cols, 1))

        conv1 = Conv2D(64,
                       3,
                       activation='relu',
                       padding='same',
                       kernel_initializer='he_normal')(inputs)
        conv1 = Conv2D(64,
                       3,
                       activation='relu',
                       padding='same',
                       kernel_initializer='he_normal')(conv1)
        pool1 = MaxPooling2D(pool_size=(2, 2))(
            conv1)  #pool1=MaxPolong2D()(b)是指张量b作为输入,其他与此类同

        conv2 = Conv2D(128,
                       3,
                       activation='relu',
                       padding='same',
                       kernel_initializer='he_normal')(pool1)
        conv2 = Conv2D(128,
                       3,
                       activation='relu',
                       padding='same',
                       kernel_initializer='he_normal')(conv2)
        pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)

        conv3 = Conv2D(256,
                       3,
                       activation='relu',
                       padding='same',
                       kernel_initializer='he_normal')(pool2)
        conv3 = Conv2D(256,
                       3,
                       activation='relu',
                       padding='same',
                       kernel_initializer='he_normal')(conv3)
        pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)

        conv4 = Conv2D(512,
                       3,
                       activation='relu',
                       padding='same',
                       kernel_initializer='he_normal')(pool3)
        conv4 = Conv2D(512,
                       3,
                       activation='relu',
                       padding='same',
                       kernel_initializer='he_normal')(conv4)
        drop4 = Dropout(0.5)(conv4)
        pool4 = MaxPooling2D(pool_size=(2, 2))(drop4)

        conv5 = Conv2D(1024,
                       3,
                       activation='relu',
                       padding='same',
                       kernel_initializer='he_normal')(pool4)
        conv5 = Conv2D(1024,
                       3,
                       activation='relu',
                       padding='same',
                       kernel_initializer='he_normal')(conv5)
        drop5 = Dropout(0.5)(conv5)

        up6 = Conv2D(512,
                     2,
                     activation='relu',
                     padding='same',
                     kernel_initializer='he_normal')(
                         UpSampling2D(size=(2, 2))(drop5))
        merge6 = merge([drop4, up6], mode='concat', concat_axis=3)
        conv6 = Conv2D(512,
                       3,
                       activation='relu',
                       padding='same',
                       kernel_initializer='he_normal')(merge6)
        conv6 = Conv2D(512,
                       3,
                       activation='relu',
                       padding='same',
                       kernel_initializer='he_normal')(conv6)

        up7 = Conv2D(256,
                     2,
                     activation='relu',
                     padding='same',
                     kernel_initializer='he_normal')(
                         UpSampling2D(size=(2, 2))(conv6))
        merge7 = merge([conv3, up7], mode='concat', concat_axis=3)
        conv7 = Conv2D(256,
                       3,
                       activation='relu',
                       padding='same',
                       kernel_initializer='he_normal')(merge7)
        conv7 = Conv2D(256,
                       3,
                       activation='relu',
                       padding='same',
                       kernel_initializer='he_normal')(conv7)

        up8 = Conv2D(128,
                     2,
                     activation='relu',
                     padding='same',
                     kernel_initializer='he_normal')(
                         UpSampling2D(size=(2, 2))(conv7))
        merge8 = merge([conv2, up8], mode='concat', concat_axis=3)
        conv8 = Conv2D(128,
                       3,
                       activation='relu',
                       padding='same',
                       kernel_initializer='he_normal')(merge8)
        conv8 = Conv2D(128,
                       3,
                       activation='relu',
                       padding='same',
                       kernel_initializer='he_normal')(conv8)

        up9 = Conv2D(64,
                     2,
                     activation='relu',
                     padding='same',
                     kernel_initializer='he_normal')(
                         UpSampling2D(size=(2, 2))(conv8))
        merge9 = merge([conv1, up9], mode='concat', concat_axis=3)
        conv9 = Conv2D(64,
                       3,
                       activation='relu',
                       padding='same',
                       kernel_initializer='he_normal')(merge9)
        conv9 = Conv2D(64,
                       3,
                       activation='relu',
                       padding='same',
                       kernel_initializer='he_normal')(conv9)
        conv9 = Conv2D(2,
                       3,
                       activation='relu',
                       padding='same',
                       kernel_initializer='he_normal')(conv9)
        conv10 = Conv2D(1, 1, activation='sigmoid')(conv9)

        model = Model(input=inputs, output=conv10)

        model.compile(optimizer=Adam(lr=1e-4),
                      loss='binary_crossentropy',
                      metrics=['accuracy'])
        print('model compile')
        return model
コード例 #53
0
def AlexNet(weights_path=None, heatmap=False):
    if heatmap:
        inputs = Input(shape=(3, None, None))
    else:
        inputs = Input(shape=(3, 227, 227))

    conv_1 = Convolution2D(96, (11, 11),
                           strides=(4, 4),
                           activation='relu',
                           name='conv_1')(inputs)

    conv_2 = MaxPooling2D((3, 3), strides=(2, 2))(conv_1)
    conv_2 = crosschannelnormalization(name='convpool_1')(conv_2)
    conv_2 = ZeroPadding2D((2, 2))(conv_2)
    conv_2 = merge([
        Convolution2D(
            128, (5, 5), activation='relu', name='conv_2_' + str(i + 1))(
                splittensor(ratio_split=2, id_split=i)(conv_2))
        for i in range(2)
    ],
                   mode='concat',
                   concat_axis=1,
                   name='conv_2')

    conv_3 = MaxPooling2D((3, 3), strides=(2, 2))(conv_2)
    conv_3 = crosschannelnormalization()(conv_3)
    conv_3 = ZeroPadding2D((1, 1))(conv_3)
    conv_3 = Convolution2D(384, (3, 3), activation='relu',
                           name='conv_3')(conv_3)

    conv_4 = ZeroPadding2D((1, 1))(conv_3)
    conv_4 = merge([
        Convolution2D(
            192, (3, 3), activation='relu', name='conv_4_' + str(i + 1))(
                splittensor(ratio_split=2, id_split=i)(conv_4))
        for i in range(2)
    ],
                   mode='concat',
                   concat_axis=1,
                   name='conv_4')

    conv_5 = ZeroPadding2D((1, 1))(conv_4)
    conv_5 = merge([
        Convolution2D(
            128, (3, 3), activation='relu', name='conv_5_' + str(i + 1))(
                splittensor(ratio_split=2, id_split=i)(conv_5))
        for i in range(2)
    ],
                   mode='concat',
                   concat_axis=1,
                   name='conv_5')

    dense_1 = MaxPooling2D((3, 3), strides=(2, 2), name='convpool_5')(conv_5)

    if heatmap:
        dense_1 = Convolution2D(4096, (6, 6),
                                activation='relu',
                                name='dense_1')(dense_1)
        dense_2 = Convolution2D(4096, (1, 1),
                                activation='relu',
                                name='dense_2')(dense_1)
        dense_3 = Convolution2D(1000, (1, 1), name='dense_3')(dense_2)
        prediction = Softmax4D(axis=1, name='softmax')(dense_3)
    else:
        dense_1 = Flatten(name='flatten')(dense_1)
        dense_1 = Dense(4096, activation='relu', name='dense_1')(dense_1)
        dense_2 = Dropout(0.5)(dense_1)
        dense_2 = Dense(4096, activation='relu', name='dense_2')(dense_2)
        dense_3 = Dropout(0.5)(dense_2)
        dense_3 = Dense(1000, name='dense_3')(dense_3)
        prediction = Activation('softmax', name='softmax')(dense_3)

    model = Model(input=inputs, output=prediction)

    if weights_path:
        model.load_weights(weights_path)

    return model
lstm_input_r13=tf.concat(1,[lstm_input_r12,tf.expand_dims(atten_output_r15, 1)])
lstm_input_r14=tf.concat(1,[lstm_input_r13,tf.expand_dims(atten_output_r16, 1)])
lstm_input_r15=tf.concat(1,[lstm_input_r13,tf.expand_dims(atten_output_r17, 1)])
lstm_input_r16=tf.concat(1,[lstm_input_r13,tf.expand_dims(atten_output_r18, 1)])

var_c0_c= LSTM(output_dim=256,activation='relu',return_sequences=False,W_regularizer=l2(0.01), inner_activation='sigmoid')(lstm_input_c4)
var_r0_r= LSTM(output_dim=256,activation='relu',return_sequences=False,W_regularizer=l2(0.01), inner_activation='sigmoid')(lstm_input_r16)

var_r1=Dense(100,bias=False,activation='tanh')(var_r0_r)
var_c1=Dense(100,bias=False,activation='tanh')(var_c0_c)

#dropout layer
var_c= Dropout(0.5)(var_c1)
var_r= Dropout(0.5)(var_r1)

var = merge([var_c,var_r], mode= 'concat')

predictions= Dense(2,bias=False,activation='softmax')(var)



labels = tf.placeholder(tf.float32, shape=(None,2))


loss = tf.reduce_mean(binary_crossentropy(labels, predictions))

train_w_step = tf.train.AdagradOptimizer(learning_rate=0.1).minimize(loss)

acc_value = accuracy(labels, predictions)

コード例 #55
0
def get_2k_twopath_twoconv(img_w, img_h):
    input_img = Input(shape=(img_w, img_h, 1))

    normal = BatchNormalization(input_shape=(img_w, img_h, 1))(input_img)

    conv1 = Convolution2D(184,
                          4,
                          4,
                          init='glorot_normal',
                          input_shape=(img_w, img_h, 1),
                          border_mode='valid',
                          activation='relu')(normal)
    max1 = MaxPooling2D(pool_size=(2, 2), strides=(2, 2),
                        border_mode='same')(conv1)
    drop1 = Dropout(0.25)(max1)

    conv2 = Convolution2D(60,
                          4,
                          4,
                          init='glorot_normal',
                          activation='relu',
                          border_mode='valid')(drop1)
    max2 = MaxPooling2D(pool_size=(2, 2), strides=(2, 2),
                        border_mode='same')(conv2)
    drop2 = Dropout(0.25)(max2)

    conv1_bigk = Convolution2D(184,
                               8,
                               8,
                               init='glorot_normal',
                               input_shape=(img_w, img_h, 1),
                               border_mode='valid',
                               activation='relu')(normal)
    max1_bigk = MaxPooling2D(pool_size=(2, 2),
                             strides=(2, 2),
                             border_mode='same')(conv1_bigk)
    drop1_bigk = Dropout(0.25)(max1_bigk)

    merge_all = merge([drop2, drop1_bigk], mode='concat', concat_axis=-1)

    conv3 = Convolution2D(40,
                          4,
                          4,
                          init='glorot_normal',
                          activation='relu',
                          border_mode='same')(merge_all)
    max3 = MaxPooling2D(pool_size=(2, 2), strides=(2, 2),
                        border_mode='same')(conv3)
    drop3 = Dropout(0.25)(max3)

    flat = Flatten()(drop3)

    # model.add(BatchNormalization())

    dense1 = Dense(500, activation='relu')(flat)
    drop_final = Dropout(0.5)(dense1)

    choice = Dense(3, init='glorot_normal', activation='softmax')(drop_final)
    # model.add(Activation('softmax'))

    model = Model(input=input_img, output=choice)

    for i in range(len(model.layers)):
        print(model.layers[i].output_shape, model.layers[i].output_shape)

    plot(model, to_file='model_twopath.png')

    return model
コード例 #56
0
actor.add(Flatten(input_shape=(1, ) + env.observation_space.shape))
actor.add(Dense(32))
actor.add(Activation('relu'))
actor.add(Dense(32))
actor.add(Activation('relu'))
actor.add(Dense(32))
actor.add(Activation('relu'))
actor.add(Dense(nb_actions))
actor.add(Activation('sigmoid'))
print(actor.summary())

action_input = Input(shape=(nb_actions, ), name='action_input')
observation_input = Input(shape=(1, ) + env.observation_space.shape,
                          name='observation_input')
flattened_observation = Flatten()(observation_input)
x = merge([action_input, flattened_observation], mode='concat')
x = Dense(64)(x)
x = Activation('relu')(x)
x = Dense(64)(x)
x = Activation('relu')(x)
x = Dense(64)(x)
x = Activation('relu')(x)
x = Dense(1)(x)
x = Activation('linear')(x)
critic = Model(input=[action_input, observation_input], output=x)
print(critic.summary())

# Finally, we configure and compile our agent. You can use every built-in Keras optimizer and
# even the metrics!
memory = SequentialMemory(limit=100000, window_length=1)
random_process = OrnsteinUhlenbeckProcess(theta=float(args.theta),
コード例 #57
0
ファイル: dna.py プロジェクト: vreuter/deepcpg
    def _res_unit(self,
                  inputs,
                  nb_filter,
                  size=3,
                  stride=1,
                  atrous=1,
                  stage=1,
                  block=1):

        name = '%02d-%02d/' % (stage, block)
        id_name = '%sid_' % (name)
        res_name = '%sres_' % (name)

        # Residual branch

        # 1x1 down-sample conv
        x = kl.BatchNormalization(name=res_name + 'bn1')(inputs)
        x = kl.Activation('relu', name=res_name + 'act1')(x)
        w_reg = kr.WeightRegularizer(l1=self.l1_decay, l2=self.l2_decay)
        x = kl.Conv1D(nb_filter[0],
                      1,
                      name=res_name + 'conv1',
                      subsample_length=stride,
                      init=self.init,
                      W_regularizer=w_reg)(x)

        # LxL conv
        x = kl.BatchNormalization(name=res_name + 'bn2')(x)
        x = kl.Activation('relu', name=res_name + 'act2')(x)
        w_reg = kr.WeightRegularizer(l1=self.l1_decay, l2=self.l2_decay)
        x = kl.AtrousConv1D(nb_filter[1],
                            size,
                            atrous_rate=atrous,
                            name=res_name + 'conv2',
                            border_mode='same',
                            init=self.init,
                            W_regularizer=w_reg)(x)

        # 1x1 up-sample conv
        x = kl.BatchNormalization(name=res_name + 'bn3')(x)
        x = kl.Activation('relu', name=res_name + 'act3')(x)
        w_reg = kr.WeightRegularizer(l1=self.l1_decay, l2=self.l2_decay)
        x = kl.Conv1D(nb_filter[2],
                      1,
                      name=res_name + 'conv3',
                      init=self.init,
                      W_regularizer=w_reg)(x)

        # Identity branch
        if nb_filter[-1] != inputs._keras_shape[-1] or stride > 1:
            w_reg = kr.WeightRegularizer(l1=self.l1_decay, l2=self.l2_decay)
            identity = kl.Conv1D(nb_filter[2],
                                 1,
                                 name=id_name + 'conv1',
                                 subsample_length=stride,
                                 init=self.init,
                                 W_regularizer=w_reg)(inputs)
        else:
            identity = inputs

        x = kl.merge([identity, x], name=name + 'merge', mode='sum')

        return x
コード例 #58
0
def _shortcut(input, residual):
    return merge([input, residual], mode='sum')
コード例 #59
0
rnn_kwargs = dict(output_dim=SENT_HIDDEN_SIZE, dropout_W=DP, dropout_U=DP)

premise = Input(shape=(MAX_LEN, ), dtype='int32')
hypothesis = Input(shape=(MAX_LEN, ), dtype='int32')

prem = embed(premise)
hypo = embed(hypothesis)

rnn_prem = RNN(return_sequences=False, **rnn_kwargs)
rnn_hypo = RNN(return_sequences=False, **rnn_kwargs)
prem = rnn_prem(prem)
prem = Dropout(DP)(prem)
hypo = rnn_hypo(hypo)
hypo = Dropout(DP)(hypo)

joint = merge([prem, hypo], mode='concat')
joint = Dense(output_dim=50, activation='tanh', W_regularizer=l2(0.01))(joint)
pred = Dense(len(LABELS), activation='softmax', W_regularizer=l2(0.01))(joint)

model = Model(input=[premise, hypothesis], output=pred)
model.compile(optimizer=OPTIMIZER,
              loss='categorical_crossentropy',
              metrics=['accuracy'])

model.summary()

print('Training')
_, tmpfn = tempfile.mkstemp()
# Save the best model during validation and bail out of training early if we're not improving
callbacks = [
    EarlyStopping(patience=PATIENCE),
コード例 #60
0
def Xception(include_top=True, weights='imagenet',
             input_tensor=None):
    '''Instantiate the Xception architecture,
    optionally loading weights pre-trained
    on ImageNet. This model is available for TensorFlow only,
    and can only be used with inputs following the TensorFlow
    dimension ordering `(width, height, channels)`.
    You should set `image_dim_ordering="tf"` in your Keras config
    located at ~/.keras/keras.json.

    Note that the default input image size for this model is 299x299.

    # Arguments
        include_top: whether to include the fully-connected
            layer at the top of the network.
        weights: one of `None` (random initialization)
            or "imagenet" (pre-training on ImageNet).
        input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
            to use as image input for the model.

    # Returns
        A Keras model instance.
    '''
    if weights not in {'imagenet', None}:
        raise ValueError('The `weights` argument should be either '
                         '`None` (random initialization) or `imagenet` '
                         '(pre-training on ImageNet).')
    if K.backend() != 'tensorflow':
        raise Exception('The Xception model is only available with '
                        'the TensorFlow backend.')
    if K.image_dim_ordering() != 'tf':
        warnings.warn('The Xception model is only available for the '
                      'input dimension ordering "tf" '
                      '(width, height, channels). '
                      'However your settings specify the default '
                      'dimension ordering "th" (channels, width, height). '
                      'You should set `image_dim_ordering="tf"` in your Keras '
                      'config located at ~/.keras/keras.json. '
                      'The model being returned right now will expect inputs '
                      'to follow the "tf" dimension ordering.')
        K.set_image_dim_ordering('tf')
        old_dim_ordering = 'th'
    else:
        old_dim_ordering = None

    # Determine proper input shape
    if include_top:
        input_shape = (299, 299, 3)
    else:
        input_shape = (None, None, 3)

    if input_tensor is None:
        img_input = Input(shape=input_shape)
    else:
        if not K.is_keras_tensor(input_tensor):
            img_input = Input(tensor=input_tensor, shape=input_shape)
        else:
            img_input = input_tensor

    x = Conv2D(32, 3, 3, subsample=(2, 2), bias=False, name='block1_conv1')(img_input)
    x = BatchNormalization(name='block1_conv1_bn')(x)
    x = Activation('relu', name='block1_conv1_act')(x)
    x = Conv2D(64, 3, 3, bias=False, name='block1_conv2')(x)
    x = BatchNormalization(name='block1_conv2_bn')(x)
    x = Activation('relu', name='block1_conv2_act')(x)

    residual = Conv2D(128, 1, 1, subsample=(2, 2),
                      border_mode='same', bias=False)(x)
    residual = BatchNormalization()(residual)

    x = SeparableConv2D(128, 3, 3, border_mode='same', bias=False, name='block2_sepconv1')(x)
    x = BatchNormalization(name='block2_sepconv1_bn')(x)
    x = Activation('relu', name='block2_sepconv2_act')(x)
    x = SeparableConv2D(128, 3, 3, border_mode='same', bias=False, name='block2_sepconv2')(x)
    x = BatchNormalization(name='block2_sepconv2_bn')(x)

    x = MaxPooling2D((3, 3), strides=(2, 2), border_mode='same', name='block2_pool')(x)
    x = merge([x, residual], mode='sum')

    residual = Conv2D(256, 1, 1, subsample=(2, 2),
                      border_mode='same', bias=False)(x)
    residual = BatchNormalization()(residual)

    x = Activation('relu', name='block3_sepconv1_act')(x)
    x = SeparableConv2D(256, 3, 3, border_mode='same', bias=False, name='block3_sepconv1')(x)
    x = BatchNormalization(name='block3_sepconv1_bn')(x)
    x = Activation('relu', name='block3_sepconv2_act')(x)
    x = SeparableConv2D(256, 3, 3, border_mode='same', bias=False, name='block3_sepconv2')(x)
    x = BatchNormalization(name='block3_sepconv2_bn')(x)

    x = MaxPooling2D((3, 3), strides=(2, 2), border_mode='same', name='block3_pool')(x)
    x = merge([x, residual], mode='sum')

    residual = Conv2D(728, 1, 1, subsample=(2, 2),
                      border_mode='same', bias=False)(x)
    residual = BatchNormalization()(residual)

    x = Activation('relu', name='block4_sepconv1_act')(x)
    x = SeparableConv2D(728, 3, 3, border_mode='same', bias=False, name='block4_sepconv1')(x)
    x = BatchNormalization(name='block4_sepconv1_bn')(x)
    x = Activation('relu', name='block4_sepconv2_act')(x)
    x = SeparableConv2D(728, 3, 3, border_mode='same', bias=False, name='block4_sepconv2')(x)
    x = BatchNormalization(name='block4_sepconv2_bn')(x)

    x = MaxPooling2D((3, 3), strides=(2, 2), border_mode='same', name='block4_pool')(x)
    x = merge([x, residual], mode='sum')

    for i in range(8):
        residual = x
        prefix = 'block' + str(i + 5)

        x = Activation('relu', name=prefix + '_sepconv1_act')(x)
        x = SeparableConv2D(728, 3, 3, border_mode='same', bias=False, name=prefix + '_sepconv1')(x)
        x = BatchNormalization(name=prefix + '_sepconv1_bn')(x)
        x = Activation('relu', name=prefix + '_sepconv2_act')(x)
        x = SeparableConv2D(728, 3, 3, border_mode='same', bias=False, name=prefix + '_sepconv2')(x)
        x = BatchNormalization(name=prefix + '_sepconv2_bn')(x)
        x = Activation('relu', name=prefix + '_sepconv3_act')(x)
        x = SeparableConv2D(728, 3, 3, border_mode='same', bias=False, name=prefix + '_sepconv3')(x)
        x = BatchNormalization(name=prefix + '_sepconv3_bn')(x)

        x = merge([x, residual], mode='sum')

    residual = Conv2D(1024, 1, 1, subsample=(2, 2),
                      border_mode='same', bias=False)(x)
    residual = BatchNormalization()(residual)

    x = Activation('relu', name='block13_sepconv1_act')(x)
    x = SeparableConv2D(728, 3, 3, border_mode='same', bias=False, name='block13_sepconv1')(x)
    x = BatchNormalization(name='block13_sepconv1_bn')(x)
    x = Activation('relu', name='block13_sepconv2_act')(x)
    x = SeparableConv2D(1024, 3, 3, border_mode='same', bias=False, name='block13_sepconv2')(x)
    x = BatchNormalization(name='block13_sepconv2_bn')(x)

    x = MaxPooling2D((3, 3), strides=(2, 2), border_mode='same', name='block13_pool')(x)
    x = merge([x, residual], mode='sum')

    x = SeparableConv2D(1536, 3, 3, border_mode='same', bias=False, name='block14_sepconv1')(x)
    x = BatchNormalization(name='block14_sepconv1_bn')(x)
    x = Activation('relu', name='block14_sepconv1_act')(x)

    x = SeparableConv2D(2048, 3, 3, border_mode='same', bias=False, name='block14_sepconv2')(x)
    x = BatchNormalization(name='block14_sepconv2_bn')(x)
    x = Activation('relu', name='block14_sepconv2_act')(x)

    if include_top:
        x = GlobalAveragePooling2D(name='avg_pool')(x)
        x = Dense(1000, activation='softmax', name='predictions')(x)

    # Create model
    model = Model(img_input, x)

    # load weights
    if weights == 'imagenet':
        if include_top:
            weights_path = get_file('xception_weights_tf_dim_ordering_tf_kernels.h5',
                                    TF_WEIGHTS_PATH,
                                    cache_subdir='models')
        else:
            weights_path = get_file('xception_weights_tf_dim_ordering_tf_kernels_notop.h5',
                                    TF_WEIGHTS_PATH_NO_TOP,
                                    cache_subdir='models')
        model.load_weights(weights_path)

    if old_dim_ordering:
        K.set_image_dim_ordering(old_dim_ordering)
    return model