示例#1
0
    def inference(self, mode='simple'):

        left_image = Input(shape=(self.model_in_height, self.model_in_width,
                                  self.model_in_depth),
                           name='left_input')
        right_image = Input(shape=(self.model_in_height, self.model_in_width,
                                   self.model_in_depth),
                            name='right_image')

        if mode == 'simple':
            concate_view = concatenate([left_image, right_image],
                                       axis=3,
                                       name='concate_view')
            prediction = self.FlowNetSimple(concate_view)

            FlowNet = Model(inputs=[left_image, right_image],
                            outputs=[prediction])
            opt = Adam(lr=self.learning_rate)
            FlowNet.compile(optimizer=opt, loss='mae')
            FlowNet.summary()

            return FlowNet

        if mode == 'correlation':
            prediction = self.FlowNetCorr(left_image, right_image)

            FlowNet = Model(inputs=[left_image, right_image],
                            outputs=[prediction])
            opt = Adam(lr=self.learning_rate)
            FlowNet.compile(optimizer=opt, loss='mae')
            FlowNet.summary()

            return FlowNet
示例#2
0
def get_model(filters_count, conv_depth, learning_rate, input_shape=(512, 512, 9)):
    # input shape=512x512x9 ?灰度化的9个图?
    input_90d = Input(shape=input_shape, name='input_90d')
    input_0d = Input(shape=input_shape, name='input_0d')
    input_45d = Input(shape=input_shape, name='input_45d')
    input_m45d = Input(shape=input_shape, name='input_m45d')

    # 4 Stream layer
    stream_ver = layersP1_multistream(input_shape, int(filters_count))(input_90d)
    stream_hor = layersP1_multistream(input_shape, int(filters_count))(input_0d)
    stream_45d = layersP1_multistream(input_shape, int(filters_count))(input_45d)
    stream_m45d = layersP1_multistream(input_shape, int(filters_count))(input_m45d)

    # merge streams
    merged = concatenate([stream_ver,stream_hor, stream_45d, stream_m45d], name='merged')

    # layers part2: conv-relu-bn-conv-relu
    merged = layersP2_merged(input_shape=(input_shape[0], input_shape[1], int(filters_count) * 4),
                             filters_count=int(filters_count) * 4,
                             conv_depth=conv_depth)(merged)

    # output
    output = layersP3_output(input_shape=(input_shape[0], input_shape[1], int(filters_count) * 4),
                             filters_count=int(filters_count) * 4)(merged)

    mymodel = Model(inputs=[input_90d,input_0d, input_45d, input_m45d], outputs=[output])

    optimizer = RMSprop(lr=learning_rate)
    mymodel.compile(optimizer=optimizer, loss='mae')
    mymodel.summary()

    return mymodel
示例#3
0
def define_epinet(sz_input,sz_input2,view_n,conv_depth,filt_num,learning_rate):

    ''' 4-Input : Conv - Relu - Conv - BN - Relu ''' 
    input_stack_90d = Input(shape=(sz_input,sz_input2,len(view_n)), name='input_stack_90d')
    input_stack_0d= Input(shape=(sz_input,sz_input2,len(view_n)), name='input_stack_0d')
    input_stack_45d= Input(shape=(sz_input,sz_input2,len(view_n)), name='input_stack_45d')
    input_stack_M45d= Input(shape=(sz_input,sz_input2,len(view_n)), name='input_stack_M45d')
    
    ''' 4-Stream layer : Conv - Relu - Conv - BN - Relu ''' 
    mid_90d=layer1_multistream(sz_input,sz_input2,len(view_n),int(filt_num))(input_stack_90d)
    mid_0d=layer1_multistream(sz_input,sz_input2,len(view_n),int(filt_num))(input_stack_0d)    
    mid_45d=layer1_multistream(sz_input,sz_input2,len(view_n),int(filt_num))(input_stack_45d)    
    mid_M45d=layer1_multistream(sz_input,sz_input2,len(view_n),int(filt_num))(input_stack_M45d)   

    ''' Merge layers ''' 
    mid_merged = concatenate([mid_90d,mid_0d,mid_45d,mid_M45d],  name='mid_merged')
    
    ''' Merged layer : Conv - Relu - Conv - BN - Relu '''
    mid_merged_=layer2_merged(sz_input-6,sz_input2-6,int(4*filt_num),int(4*filt_num),conv_depth)(mid_merged)

    ''' Last Conv layer : Conv - Relu - Conv '''
    output=layer3_last(sz_input-18,sz_input2-18,int(4*filt_num),int(4*filt_num))(mid_merged_)

    model_512 = Model(inputs = [input_stack_90d,input_stack_0d,
                               input_stack_45d,input_stack_M45d], outputs = [output])
    opt = RMSprop(lr=learning_rate)
    model_512.compile(optimizer=opt, loss='mae')
    model_512.summary() 
    
    return model_512
示例#4
0
def build_models(seq_len=12, num_classes=4, load_weights=False):
    # DST-Net: ResNet50
    resnet = ResNet50(weights='imagenet', include_top=False)
    for layer in resnet.layers:
        layer.trainable = False
    resnet.load_weights('model/resnet.h5')
    # DST-Net: Conv3D + Bi-LSTM
    inputs = Input(shape=(seq_len, 7, 7, 2048))
    # conv1_1, conv3D and flatten
    conv1_1 = TimeDistributed(Conv2D(128, 1, 1, activation='relu'))(inputs)
    conv3d = Conv3D(64, 3, 1, 'SAME', activation='relu')(conv1_1)
    flatten = Reshape(target_shape=(seq_len, 7 * 7 * 64))(conv3d)
    # 2 Layers Bi-LSTM
    bilstm_1 = Bidirectional(LSTM(128, dropout=0.5,
                                  return_sequences=True))(flatten)
    bilstm_2 = Bidirectional(LSTM(128, dropout=0.5,
                                  return_sequences=False))(bilstm_1)
    outputs = Dense(num_classes, activation='softmax')(bilstm_2)
    dstnet = Model(inputs=inputs, outputs=outputs)
    dstnet.compile(loss='categorical_crossentropy',
                   optimizer=SGD(lr=0.001, momentum=0.9, nesterov=True))
    # load models
    if load_weights:
        dstnet.load_weights('model/dstnet.h5')
    return resnet, dstnet
示例#5
0
def LoadModel():
	global D
	D=None
	D=load_model('./tdoa1.h5')
	if D==None:
		i=Input(shape=(M,4))
		print("1=====",i)
		a=Flatten()(i)
		a=Dense(200,activation='relu')(a)
		a=Dense(160,activation='relu')(a)
		a=Dense(120,activation='relu')(a)
		a=Dense(80,activation='relu')(a)
		a=Dense(80,activation='relu')(a)
		a=Dense(80,activation='relu')(a)
		a=Dense(60,activation='relu')(a)
		a=Dense(60,activation='relu')(a)
		a=Dense(60,activation='relu')(a)
		a=Dense(60,activation='relu')(a)
		a=Dense(60,activation='relu')(a)
		a=Dense(60,activation='relu')(a)
		a=Dense(60,activation='relu')(a)
		a=Dense(60,activation='relu')(a)
		a=Dense(40,activation='relu')(a)
		a=Dense(40,activation='relu')(a)
		a=Dense(40,activation='relu')(a)
		a=Dense(40,activation='relu')(a)
		a=Dense(40,activation='relu')(a)
		a=Dense(40,activation='relu')(a)
		a=Dense(20,activation='relu')(a)
		a=Dense(10,activation='relu')(a)
		o=Dense(2,activation='tanh')(a)
		D=Model(inputs=i,outputs=o)
		D.compile(loss='mse',optimizer='adam',metrics=['accuracy'])
示例#6
0
def CreateSimpleImageModel_512():
	dataIn = Input(shape=(3,))
	layer = Dense(4 * 4, activation='tanh')(dataIn)
	layer = Dense(512 * 512 * 4, activation='linear')(layer)
	layer = Reshape((1, 512, 512, 4))(layer)
	modelOut = layer
	model = Model(inputs=[dataIn], outputs=[modelOut])
	adam = Adam(lr=0.005, decay=0.0001)
	model.compile(loss='mean_squared_error', optimizer=adam, metrics=['accuracy'])
	return model
示例#7
0
def CreateSimpleImageModel_512():
    dataIn = Input(shape=(3, ))
    layer = Dense(4 * 4, activation='tanh')(dataIn)
    layer = Dense(512 * 512 * 4, activation='linear')(layer)
    layer = Reshape((1, 512, 512, 4))(layer)
    modelOut = layer
    model = Model(inputs=[dataIn], outputs=[modelOut])
    adam = Adam(lr=0.005, decay=0.0001)
    model.compile(loss='mean_squared_error',
                  optimizer=adam,
                  metrics=['accuracy'])
    return model
示例#8
0
def ResNet34V2_model():
    inpt = Input(shape=(224, 224, 3))
    x = ZeroPadding2D((3, 3))(inpt)
    x = _BN_ReLU_Conv2d(x,
                        nb_filter=64,
                        kernel_size=(7, 7),
                        strides=(2, 2),
                        padding='valid')
    x = MaxPooling2D(pool_size=(3, 3), strides=(2, 2), padding='same')(x)
    # (56,56,64)
    x = Conv_Block(x, nb_filter=64, kernel_size=(3, 3))
    x = Conv_Block(x, nb_filter=64, kernel_size=(3, 3))
    x = Conv_Block(x, nb_filter=64, kernel_size=(3, 3))
    # (28,28,128)
    x = Conv_Block(x,
                   nb_filter=128,
                   kernel_size=(3, 3),
                   strides=(2, 2),
                   with_conv_shortcut=True)
    x = Conv_Block(x, nb_filter=128, kernel_size=(3, 3))
    x = Conv_Block(x, nb_filter=128, kernel_size=(3, 3))
    x = Conv_Block(x, nb_filter=128, kernel_size=(3, 3))
    # (14,14,256)
    x = Conv_Block(x,
                   nb_filter=256,
                   kernel_size=(3, 3),
                   strides=(2, 2),
                   with_conv_shortcut=True)
    x = Conv_Block(x, nb_filter=256, kernel_size=(3, 3))
    x = Conv_Block(x, nb_filter=256, kernel_size=(3, 3))
    x = Conv_Block(x, nb_filter=256, kernel_size=(3, 3))
    x = Conv_Block(x, nb_filter=256, kernel_size=(3, 3))
    x = Conv_Block(x, nb_filter=256, kernel_size=(3, 3))
    # (7,7,512)
    x = Conv_Block(x,
                   nb_filter=512,
                   kernel_size=(3, 3),
                   strides=(2, 2),
                   with_conv_shortcut=True)
    x = Conv_Block(x, nb_filter=512, kernel_size=(3, 3))
    x = Conv_Block(x, nb_filter=512, kernel_size=(3, 3))
    x = AveragePooling2D(pool_size=(7, 7))(x)
    x = Flatten()(x)
    # x = Dense(1000,activation='softmax')(x)
    x = [
        Dense(n_class, activation='softmax', name='P%d' % (i + 1))(x)
        for i in range(7)
    ]
    model = Model(inputs=inpt, outputs=x)
    model.compile(loss='categorical_crossentropy',
                  optimizer='sgd',
                  metrics=['accuracy'])
    return model
示例#9
0
def define_AttMLFNet(sz_input, sz_input2, view_n, learning_rate):
    """ 4 branches inputs"""
    input_list = []
    for i in range(len(view_n) * 4):
        input_list.append(Input(shape=(sz_input, sz_input2, 1)))
    """ 4 branches features"""
    feature_extraction_layer = feature_extraction(sz_input, sz_input2)
    feature_list = []
    for i in range(len(view_n) * 4):
        feature_list.append(feature_extraction_layer(input_list[i]))
    feature_v_list = []
    feature_h_list = []
    feature_45_list = []
    feature_135_list = []
    for i in range(9):
        feature_h_list.append(feature_list[i])
    for i in range(9, 18):
        feature_v_list.append(feature_list[i])
    for i in range(18, 27):
        feature_45_list.append(feature_list[i])
    for i in range(27, len(feature_list)):
        feature_135_list.append(feature_list[i])
    """ cost volume """
    cv_h = Lambda(_get_h_CostVolume_)(feature_h_list)
    cv_v = Lambda(_get_v_CostVolume_)(feature_v_list)
    cv_45 = Lambda(_get_45_CostVolume_)(feature_45_list)
    cv_135 = Lambda(_get_135_CostVolume_)(feature_135_list)
    """ intra branch """
    cv_h_3d, cv_h_ca = to_3d_h(cv_h)
    cv_v_3d, cv_v_ca = to_3d_v(cv_v)
    cv_45_3d, cv_45_ca = to_3d_45(cv_45)
    cv_135_3d, cv_135_ca = to_3d_135(cv_135)
    """ inter branch """
    cv, attention_4 = branch_attention(
        multiply([cv_h_3d, cv_v_3d, cv_45_3d, cv_135_3d]), cv_h_ca, cv_v_ca,
        cv_45_ca, cv_135_ca)
    """ cost volume regression """
    cost = basic(cv)

    cost = Lambda(lambda x: K.permute_dimensions(K.squeeze(x, -1),
                                                 (0, 2, 3, 1)))(cost)
    pred = Activation('softmax')(cost)
    pred = Lambda(disparityregression)(pred)

    model = Model(inputs=input_list, outputs=[pred])

    model.summary()

    opt = Adam(lr=learning_rate)

    model.compile(optimizer=opt, loss='mae')

    return model
示例#10
0
 def train(self,
           x_train,
           x_test,
           y_train,
           y_test,
           embedding_matrix,
           num_classes,
           seq_length=200,
           emb_dim=100,
           train_emb=True,
           windows=(3, 4, 5, 6),
           dropouts=(0.2, 0.4),
           filter_sz=100,
           hid_dim=100,
           bch_siz=50,
           epoch=8):
     #setup and train the nueral net
     from tensorflow.contrib.keras.api.keras.models import Model
     from tensorflow.contrib.keras.api.keras.layers import Activation, Dense, Dropout, Embedding, Flatten, Input, Concatenate, Conv1D, MaxPool1D
     inp = Input(shape=(seq_length, ))
     out = Embedding(input_dim=len(embedding_matrix[:, 1]),
                     output_dim=emb_dim,
                     input_length=seq_length,
                     weights=[embedding_matrix],
                     trainable=train_emb)(inp)
     out = Dropout(dropouts[0])(out)
     convs = []
     for w in windows:
         conv = Conv1D(filters=filter_sz,
                       kernel_size=w,
                       padding='valid',
                       activation='relu',
                       strides=1)(out)
         conv = MaxPool1D(pool_size=2)(conv)
         conv = Flatten()(conv)
         convs.append(conv)
     out = Concatenate()(convs)
     out = Dense(hid_dim, activation='relu')(out)
     out = Dropout(dropouts[1])(out)
     out = Activation('relu')(out)
     out = Dense(num_classes, activation='softmax')(out)
     model = Model(inp, out)
     model.compile(loss='categorical_crossentropy',
                   optimizer='nadam',
                   metrics=['accuracy'])
     model.fit(x_train,
               y_train,
               batch_size=bch_siz,
               epochs=epoch,
               verbose=2,
               validation_data=(x_test, y_test))
     return model
示例#11
0
    def inference(self):
        ''' 4-Input : Conv - Relu - Conv - BN - Relu '''
        input_stack_90d = Input(shape=(self.img_height, self.img_width,
                                       len(self.view_n)),
                                name='input_stack_90d')
        input_stack_0d = Input(shape=(self.img_height, self.img_width,
                                      len(self.view_n)),
                               name='input_stack_0d')
        input_stack_45d = Input(shape=(self.img_height, self.img_width,
                                       len(self.view_n)),
                                name='input_stack_45d')
        input_stack_M45d = Input(shape=(self.img_height, self.img_width,
                                        len(self.view_n)),
                                 name='input_stack_M45d')
        ''' 4-Stream layer : Conv - Relu - Conv - BN - Relu '''
        mid_90d = self.layer1_multistream(self.img_height, self.img_width,
                                          len(self.view_n),
                                          int(self.filt_num))(input_stack_90d)
        mid_0d = self.layer1_multistream(self.img_height, self.img_width,
                                         len(self.view_n),
                                         int(self.filt_num))(input_stack_0d)
        mid_45d = self.layer1_multistream(self.img_height, self.img_width,
                                          len(self.view_n),
                                          int(self.filt_num))(input_stack_45d)
        mid_M45d = self.layer1_multistream(
            self.img_height, self.img_width, len(self.view_n),
            int(self.filt_num))(input_stack_M45d)
        ''' Merge layers '''
        mid_merged = concatenate([mid_90d, mid_0d, mid_45d, mid_M45d],
                                 name='mid_merged')
        ''' Merged layer : Conv - Relu - Conv - BN - Relu '''
        mid_merged_ = self.layer2_merged(self.img_height - 6,
                                         self.img_width - 6,
                                         int(4 * self.filt_num),
                                         int(4 * self.filt_num),
                                         self.conv_depth)(mid_merged)
        ''' Last Conv layer : Conv - Relu - Conv '''
        output = self.layer3_last(self.img_height - 18, self.img_width - 18,
                                  int(4 * self.filt_num),
                                  int(4 * self.filt_num))(mid_merged_)

        epinet = Model(inputs=[
            input_stack_90d, input_stack_0d, input_stack_45d, input_stack_M45d
        ],
                       outputs=[output])
        opt = RMSprop(lr=self.learning_rate)
        epinet.compile(optimizer=opt, loss='mae')
        epinet.summary()

        return epinet
def define_cepinet(sz_input,sz_input2,view_n,conv_depth,filt_num,learning_rate,for_vis = False):
    global feats
    if for_vis:
        feats = []
    else:
        feats = None
    ''' 4-Input : Conv - Relu - Conv - BN - Relu ''' 
    input_stack_90d = Input(shape=(sz_input,sz_input2,len(view_n)), name='input_stack_90d')
    input_stack_0d= Input(shape=(sz_input,sz_input2,len(view_n)), name='input_stack_0d')
#    input_stack_45d= Input(shape=(sz_input,sz_input2,len(view_n)), name='input_stack_45d')
    input_stack_M45d= Input(shape=(sz_input,sz_input2,len(view_n)), name='input_stack_M45d')
    num_stacks = 3
    with tf.variable_scope("4-Stream"):
        ''' 4-Stream layer : Conv - Relu - Conv - BN - Relu ''' 
        mid_90d = layer1_multistream(sz_input,sz_input2,len(view_n),int(filt_num),do_vis=True,name="90d")(input_stack_90d)
        mid_0d = layer1_multistream(sz_input,sz_input2,len(view_n),int(filt_num),do_vis=True,name="0d")(input_stack_0d)    
    #    mid_45d=layer1_multistream(sz_input,sz_input2,len(view_n),int(filt_num))(input_stack_45d)    
        mid_M45d = layer1_multistream(sz_input,sz_input2,len(view_n),int(filt_num),do_vis=True,name="M45d")(input_stack_M45d)   

    with tf.variable_scope("Merge"):
        ''' Merge layers ''' 
        mid_merged = concatenate([mid_90d,mid_0d,mid_M45d],  name='mid_merged')
        
        ''' Merged layer : Conv - Relu - Conv - BN - Relu '''
        mid_merged_=layer2_merged(sz_input-6,sz_input2-6,int(num_stacks*filt_num),int(num_stacks*filt_num),conv_depth)(mid_merged)

    with tf.variable_scope("Last"):
        ''' Last Conv layer : Conv - Relu - Conv '''
        output=layer3_last(sz_input-18,sz_input2-18,int(num_stacks*filt_num),int(num_stacks*filt_num))(mid_merged_)

    if for_vis:
        feat_outs90d = [feat(input_stack_90d) for feat in feats[0:6]]
        feat_outs0d =  [feat(input_stack_0d) for feat in feats[6:12]]
        feat_outsM45d = [feat(input_stack_M45d) for feat in feats[12:18]]
        outputs = feat_outs90d + feat_outs0d + feat_outsM45d + [output]

    else: 
        outputs = [output]
    model_512 = Model(inputs = [input_stack_90d,input_stack_0d,
#                               input_stack_45d,
                               input_stack_M45d], outputs = outputs)
    opt = RMSprop(lr=learning_rate)
    model_512.compile(optimizer=opt, loss='mae')
    model_512.summary() 
    
    return model_512, feat_names
示例#13
0
def define_LFattNet(sz_input, sz_input2, view_n, learning_rate):
    """ 81 inputs"""
    input_list = []
    for i in range(len(view_n) * len(view_n)):
        print('input ' + str(i))
        input_list.append(Input(shape=(sz_input, sz_input2, 1)))
    """ 81 features"""
    feature_extraction_layer = feature_extraction(sz_input, sz_input2)

    feature_list = []
    for i in range(len(view_n) * len(view_n)):
        print('feature ' + str(i))
        feature_list.append(feature_extraction_layer(input_list[i]))
    """ cost volume """
    cv = Lambda(_getCostVolume_)(feature_list)
    """ channel attention """
    cv, attention = channel_attention(cv)
    """ cost volume regression """
    cost = basic(cv)
    cost = Lambda(lambda x: K.permute_dimensions(K.squeeze(x, -1),
                                                 (0, 2, 3, 1)))(cost)
    pred = Activation('softmax')(cost)

    pred = Lambda(disparityregression)(pred)

    # when training use below
    # model = Model(inputs=input_list, outputs=[pred])

    # when evaluation use below
    model = Model(inputs=input_list, outputs=[pred, attention])

    model.summary()

    opt = Adam(lr=learning_rate)

    model.compile(optimizer=opt, loss='mae')

    return model
示例#14
0
文件: dmnn.py 项目: tobytoy/MotionGAN
class _DMNN(object):
    def __init__(self, config):
        self.name = config.model_type + '_' + config.model_version
        self.data_set = config.data_set
        self.batch_size = config.batch_size
        self.num_actions = config.num_actions
        self.seq_len = config.pick_num if config.pick_num > 0 else (
                       config.crop_len if config.crop_len > 0 else None)
        self.njoints = config.njoints
        self.body_members = config.body_members
        self.dropout = config.dropout

        real_seq = Input(
            batch_shape=(self.batch_size, self.njoints, self.seq_len, 3),
            name='real_seq', dtype='float32')

        pred_action = self.classifier(real_seq)

        self.model = Model(real_seq, pred_action, name=self.name)
        self.model.compile(Adam(lr=config.learning_rate), 'sparse_categorical_crossentropy', ['accuracy'])

    def update_lr(self, lr):
        K.set_value(self.model.optimizer.lr, lr)
示例#15
0
def define_epinet(sz_input, sz_input2, view_n, conv_depth, filt_num,
                  learning_rate):
    global feats
    ''' 4-Input : Conv - Relu - Conv - BN - Relu '''
    input_stack_90d = Input(shape=(sz_input, sz_input2, len(view_n)),
                            name='input_stack_90d')
    input_stack_0d = Input(shape=(sz_input, sz_input2, len(view_n)),
                           name='input_stack_0d')
    input_stack_45d = Input(shape=(sz_input, sz_input2, len(view_n)),
                            name='input_stack_45d')
    input_stack_M45d = Input(shape=(sz_input, sz_input2, len(view_n)),
                             name='input_stack_M45d')
    ''' 4-Stream layer : Conv - Relu - Conv - BN - Relu '''
    mid_90d = layer1_multistream(sz_input,
                                 sz_input2,
                                 len(view_n),
                                 int(filt_num),
                                 do_vis=True,
                                 name="90d")(input_stack_90d)
    mid_0d = layer1_multistream(sz_input,
                                sz_input2,
                                len(view_n),
                                int(filt_num),
                                do_vis=True,
                                name="0d")(input_stack_0d)
    mid_45d = layer1_multistream(sz_input,
                                 sz_input2,
                                 len(view_n),
                                 int(filt_num),
                                 do_vis=True,
                                 name="45d")(input_stack_45d)
    mid_M45d = layer1_multistream(sz_input,
                                  sz_input2,
                                  len(view_n),
                                  int(filt_num),
                                  do_vis=True,
                                  name="M45d")(input_stack_M45d)
    ''' Merge layers '''
    mid_merged = concatenate([mid_90d, mid_0d, mid_45d, mid_M45d],
                             name='mid_merged')
    ''' Merged layer : Conv - Relu - Conv - BN - Relu '''
    mid_merged_ = layer2_merged(sz_input - 6, sz_input2 - 6, int(4 * filt_num),
                                int(4 * filt_num), conv_depth)(mid_merged)
    ''' Last Conv layer : Conv - Relu - Conv '''
    output = layer3_last(sz_input - 18, sz_input2 - 18, int(4 * filt_num),
                         int(4 * filt_num))(mid_merged_)

    feat_outs90d = [feat(input_stack_90d) for feat in feats[0:1]]
    feat_outs0d = [feat(input_stack_0d) for feat in feats[1:2]]
    feat_outs45d = [feat(input_stack_45d) for feat in feats[2:3]]
    feat_outsM45d = [feat(input_stack_M45d) for feat in feats[3:4]]
    outputs = feat_outs90d + feat_outs0d + feat_outs45d + feat_outsM45d + [
        output
    ]

    model_512 = Model(inputs=[
        input_stack_90d, input_stack_0d, input_stack_45d, input_stack_M45d
    ],
                      outputs=outputs)
    opt = RMSprop(lr=learning_rate)
    model_512.compile(optimizer=opt, loss='mae')
    model_512.summary()

    return model_512, feat_names
示例#16
0
class DCGAN():
    def __init__(self):
        # Input shape
        self.img_rows = 28
        self.img_cols = 28
        self.channels = 1
        self.img_shape = (self.img_rows, self.img_cols, self.channels)
        self.latent_dim = 100

        optimizer = Adam(0.0002, 0.5)

        # Build and compile the discriminator
        self.discriminator = self.build_discriminator()

        # Build the generator
        self.generator = self.build_generator()

        # load weight
        self.load_weights_from_file()

        # Compile the discriminator
        self.discriminator.compile(loss='binary_crossentropy',
                                   optimizer=optimizer,
                                   metrics=['accuracy'])

        # The generator takes noise as input and generates imgs
        z = Input(shape=(self.latent_dim, ))
        img = self.generator(z)

        # For the combined model we will only train the generator
        self.discriminator.trainable = False

        # The discriminator takes generated images as input and determines validity
        valid = self.discriminator(img)

        # The combined model  (stacked generator and discriminator)
        # Trains the generator to fool the discriminator
        self.combined = Model(z, valid)
        self.combined.compile(loss='binary_crossentropy', optimizer=optimizer)

    def build_generator(self):

        model = Sequential()

        model.add(
            Dense(128 * 7 * 7, activation="relu", input_dim=self.latent_dim))
        model.add(Reshape((7, 7, 128)))
        model.add(UpSampling2D())
        model.add(Conv2D(128, kernel_size=3, padding="same"))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Activation("relu"))
        model.add(UpSampling2D())
        model.add(Conv2D(64, kernel_size=3, padding="same"))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Activation("relu"))
        model.add(Conv2D(self.channels, kernel_size=3, padding="same"))
        model.add(Activation("tanh"))

        # model.summary()

        noise = Input(shape=(self.latent_dim, ))
        img = model(noise)

        return Model(noise, img)

    def build_discriminator(self):

        model = Sequential()

        model.add(
            Conv2D(32,
                   kernel_size=3,
                   strides=2,
                   input_shape=self.img_shape,
                   padding="same"))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.25))
        model.add(Conv2D(64, kernel_size=3, strides=2, padding="same"))
        model.add(ZeroPadding2D(padding=((0, 1), (0, 1))))
        model.add(BatchNormalization(momentum=0.8))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.25))
        model.add(Conv2D(128, kernel_size=3, strides=2, padding="same"))
        model.add(BatchNormalization(momentum=0.8))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.25))
        model.add(Conv2D(256, kernel_size=3, strides=1, padding="same"))
        model.add(BatchNormalization(momentum=0.8))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.25))
        model.add(Flatten())
        model.add(Dense(1, activation='sigmoid'))

        # model.summary()

        img = Input(shape=self.img_shape)
        validity = model(img)

        return Model(img, validity)

    def train(self, epochs, batch_size=128, save_interval=50):

        # Load the dataset
        # (X_train, _), (_, _) = mnist.load_data()
        (X_train, y_train) = mnist.train.images, mnist.train.labels
        # Rescale -1 to 1
        X_train = X_train / 127.5 - 1.
        print(X_train.shape)
        X_train = np.reshape(X_train, (-1, 28, 28, 1))

        # Adversarial ground truths
        valid = np.ones((batch_size, 1))
        fake = np.zeros((batch_size, 1))

        for epoch in range(epochs):

            # ---------------------
            #  Train Discriminator
            # ---------------------

            # Select a random half of images
            idx = np.random.randint(0, X_train.shape[0], batch_size)
            imgs = X_train[idx]

            # Sample noise and generate a batch of new images
            noise = np.random.normal(0, 1, (batch_size, self.latent_dim))
            gen_imgs = self.generator.predict(noise)

            # Train the discriminator (real classified as ones and generated as zeros)
            d_loss_real = self.discriminator.train_on_batch(imgs, valid)
            d_loss_fake = self.discriminator.train_on_batch(gen_imgs, fake)
            d_loss = 0.5 * np.add(d_loss_real, d_loss_fake)

            # ---------------------
            #  Train Generator
            # ---------------------

            # Train the generator (wants discriminator to mistake images as real)
            g_loss = self.combined.train_on_batch(noise, valid)

            # Plot the progress
            print("%d [D loss: %f, acc.: %.2f%%] [G loss: %f]" %
                  (epoch, d_loss[0], 100 * d_loss[1], g_loss))

            # If at save interval => save generated image samples
            if epoch % save_interval == 0:
                self.save_imgs(epoch)
                self.save_model()

    def save_imgs(self, epoch):
        r, c = 5, 5
        noise = np.random.normal(0, 1, (r * c, self.latent_dim))
        gen_imgs = self.generator.predict(noise)

        # Rescale images 0 - 1
        gen_imgs = 0.5 * gen_imgs + 0.5

        fig, axs = plt.subplots(r, c)
        cnt = 0
        for i in range(r):
            for j in range(c):
                axs[i, j].imshow(gen_imgs[cnt, :, :, 0], cmap='gray')
                axs[i, j].axis('off')
                cnt += 1
        fig.savefig("images/mnist_%d.png" % epoch)
        plt.close()
        return

        # Save model

    def save_model(self):
        self.discriminator.save_weights(weight_path +
                                        'dcgan_discriminator_weight.h5')
        self.generator.save_weights(weight_path + 'dcgan_generator_weight.h5')
        print("save model")

        # load weights

    def load_weights_from_file(self):
        # discriminator weight
        if self.discriminator != None and os.path.isfile(
                weight_path + "dcgan_discriminator_weight.h5"):
            print("discriminator_weights exists")
            self.discriminator.load_weights(weight_path +
                                            "dcgan_discriminator_weight.h5")
        # generator weight
        if self.generator != None and os.path.isfile(
                weight_path + "dcgan_generator_weight.h5"):
            print("generator_weights exists")
            self.generator.load_weights(weight_path +
                                        "dcgan_generator_weight.h5")
示例#17
0
row, col, pixel = x_train.shape[1:]

# 4D input.
x = Input(shape=(row, col, pixel))

# Encodes a row of pixels using TimeDistributed Wrapper.
encoded_rows = TimeDistributed(LSTM(row_hidden))(x)

# Encodes columns of encoded rows.
encoded_columns = LSTM(col_hidden)(encoded_rows)

# Final predictions and model.
prediction = Dense(num_classes, activation='softmax')(encoded_columns)
model = Model(x, prediction)
model.compile(loss='categorical_crossentropy',
              optimizer='rmsprop',
              metrics=['accuracy'])

# Training.
model.fit(x_train,
          y_train,
          batch_size=batch_size,
          epochs=epochs,
          verbose=1,
          validation_data=(x_test, y_test))

# Evaluation.
scores = model.evaluate(x_test, y_test, verbose=0)
print('Test loss:', scores[0])
print('Test accuracy:', scores[1])
示例#18
0
def main():

    # Define Functions
    build_generator = build_generator_dense
    build_discriminator = build_discriminator_dense

    # Build dictionary
    dictionary = {}
    reverse_dictionary = {}
    for i, c in enumerate(alphabet):
        dictionary[c]=i+1
        reverse_dictionary[i+1]=c

    # Build Oprimizer
    optimizer = Adam(learning_rate, 0.5)

    # Build and compile the discriminator
    print ("*** BUILDING DISCRIMINATOR ***")
    discriminator = build_discriminator()
    discriminator.compile(loss='binary_crossentropy', 
        optimizer=optimizer,
        metrics=['accuracy'])

    # Build and compile the generator
    print ("*** BUILDING GENERATOR ***")
    generator = build_generator()
    generator.compile(loss='binary_crossentropy', optimizer=optimizer)

    # The generator takes noise as input and generated samples
    z = Input(shape=noise_shape)
    gen = generator(z)

    # For the combined model we will only train the generator
    discriminator.trainable = False

    # The valid takes generated samples as input and determines validity
    valid = discriminator(gen)

    # The combined model  (stacked generator and discriminator) takes
    # noise as input => generates samples => determines validity 
    combined = Model(z, valid)
    combined.compile(loss='binary_crossentropy', optimizer=optimizer)


    # Load the dataset
    data = []   
    for line in open(input_data,"r").read().splitlines():
        this_sample=np.zeros(url_shape)

        line = line.lower()
        if len ( set(line) - set(alphabet)) == 0 and len(line) < url_len:
            for i, position in enumerate(this_sample):
                this_sample[i][0]=1.0

            for i, char in enumerate(line):
                this_sample[i][0]=0.0
                this_sample[i][dictionary[char]]=1.0
            data.append(this_sample)
        else:
            print("Uncompatible line:",  line)
        
    print("Data ready. Lines:", len(data))
    X_train = np.array(data)
    print ("Array Shape:", X_train.shape)
    half_batch = int(batch_size / 2)

    # Start Training
    for epoch in range(epochs):

        # ---------------------
        #  Train Discriminator
        # ---------------------

        # Select a random half batch of data
        idx = np.random.randint(0, X_train.shape[0], half_batch)
        samples = X_train[idx]
        noise_batch_shape = (half_batch,) + noise_shape
        noise = np.random.normal(0, 1, noise_batch_shape)

        # Generate a half batch of new data
        gens = generator.predict(noise)

        # Train the discriminator
        d_loss_real = discriminator.train_on_batch(samples, np.ones((half_batch, 1)))
        d_loss_fake = discriminator.train_on_batch(gens, np.zeros((half_batch, 1)))
        d_loss = 0.5 * np.add(d_loss_real, d_loss_fake)


        # ---------------------
        #  Train Generator
        # ---------------------

        noise_batch_shape = (batch_size,) + noise_shape
        noise = np.random.normal(0, 1, noise_batch_shape)

        # The generator wants the discriminator to label the generated samples as valid (ones)
        valid_y = np.array([1] * batch_size)

        # Train the generator
        g_loss = combined.train_on_batch(noise, valid_y)

        # Plot the progress
        print ("%d [D loss: %0.3f, acc.: %0.3f%%] [G loss: %0.3f]" % (epoch, d_loss[0], 100*d_loss[1], g_loss))

        # If at save interval, print some examples
        if epoch % save_interval == 0:
            generated_samples=[]
            r, c = 5, 5
            noise_batch_shape = (print_size,) + noise_shape
            noise = np.random.normal(0, 1, noise_batch_shape)
            gens = generator.predict(noise)

            for url in gens:
                this_url_gen = ""
                for position in url:
                    this_index = np.argmax(position)
                    if this_index != 0:
                        this_url_gen += reverse_dictionary[this_index]

                print(this_url_gen)
                generated_samples.append(this_url_gen)

    # Save networks
    discriminator.save(discriminator_savefile)
    generator.save(generator_savefile)

    # Save Samples
    fo = open(generated_savefile, "w")
    for url in generated_samples:
        print (url, file=fo)
    fo.close()
示例#19
0
    return dense3


if __name__ == '__main__':
    # 输入
    myinput = Input([32, 32, 3])
    # 构建网络
    output = lenet(myinput)
    # 建立模型
    model = Model(myinput, output)

    # 定义优化器,这里选用Adam优化器,学习率设置为0.0003
    adam = Adam(lr=0.0003)
    # 编译模型
    model.compile(optimizer=adam,
                  loss="categorical_crossentropy",
                  metrics=['accuracy'])

    # 准备数据
    # 获取输入的图像
    X = GetTrainDataByLabel('data')
    # 获取图像的label,这里使用to_categorical函数返回one-hot之后的label
    Y = to_categorical(GetTrainDataByLabel('labels'))

    # 开始训练模型,batch设置为200,一共50个epoch
    model.fit(X,
              Y,
              200,
              50,
              1,
              callbacks=[
def main():

    batch_size = _BATCH_SIZE
    noise_dim = _NOISE_DIM
    lamb = 10.0

    train = get_data()
    train_images, train_labels = make_batch(train)

    gen = generator()
    dis = discriminator()
    gen.summary()
    dis.summary()

    dis_opt = Adam(lr=1.0e-4, beta_1=0.0, beta_2=0.9)
    gen_opt = Adam(lr=1.0e-4, beta_1=0.0, beta_2=0.9)

    gen.trainable = True
    dis.trainable = False
    gen_inputs = Input(shape=(noise_dim, ))
    gen_outputs = gen(gen_inputs)
    dis_outputs = dis(gen_outputs)
    gen_model = Model(inputs=[gen_inputs], outputs=[dis_outputs])
    gen_model.compile(loss=wasserstein_loss, optimizer=gen_opt)
    gen_model.summary()

    gen.trainable = False
    dis.trainable = True
    real_inputs = Input(shape=train_images.shape[1:])
    dis_real_outputs = dis(real_inputs)

    fake_inputs = Input(shape=(noise_dim, ))
    gen_fake_outputs = gen(fake_inputs)
    dis_fake_outputs = dis(gen_fake_outputs)

    interpolate = RandomWeightedAverage()([real_inputs, gen_fake_outputs])
    dis_interpolate_outputs = dis(interpolate)

    gp_reg = partial(gradient_penalty, interpolate=interpolate, lamb=lamb)
    #gp_reg.__name__ = 'gradient_penalty'

    dis_model = Model(inputs=[real_inputs, fake_inputs],\
    outputs=[dis_real_outputs, dis_fake_outputs,dis_interpolate_outputs])

    dis_model.compile(loss=[wasserstein_loss, wasserstein_loss, gp_reg],
                      optimizer=dis_opt)
    dis_model.summary()

    max_epoch = 10001
    max_train_only_dis = 5
    minibatch_size = batch_size * max_train_only_dis
    max_loop = int(train_images.shape[0] / minibatch_size)

    real = np.zeros((batch_size, train_images.shape[1], train_images.shape[2],
                     train_images.shape[3]),
                    dtype=np.float32)
    minibatch_train_images = np.zeros(
        (minibatch_size, train_images.shape[1], train_images.shape[2],
         train_images.shape[3]),
        dtype=np.float32)

    progbar = Progbar(target=max_epoch)
    real_label = [-1] * batch_size
    fake_label = [1] * batch_size
    dummy_label = [0] * batch_size
    for epoch in range(max_epoch):

        np.random.shuffle(train_images)
        for loop in range(max_loop):

            minibatch_train_images = train_images[loop *
                                                  minibatch_size:(loop + 1) *
                                                  minibatch_size]
            for train_only_dis in range(max_train_only_dis):

                real = minibatch_train_images[train_only_dis *
                                              batch_size:(train_only_dis + 1) *
                                              batch_size]
                noise = np.random.uniform(
                    -1, 1, (batch_size, noise_dim)).astype(np.float32)
                dis_loss = dis_model.train_on_batch(
                    [real, noise], [real_label, fake_label, dummy_label])

            noise = np.random.uniform(-1, 1, (batch_size, noise_dim)).astype(
                np.float32)
            gen_loss = gen_model.train_on_batch(noise, real_label)

        progbar.add(1,
                    values=[("dis_loss", dis_loss[0]), ("gen_loss", gen_loss)])
        if epoch % 100 == 0:
            noise = np.random.uniform(-1, 1,
                                      (batch_size, 10)).astype(np.float32)
            fake = gen.predict(noise)
            tmp = [r.reshape(-1, 32) for r in fake]
            tmp = np.concatenate(tmp, axis=1)
            img = ((tmp / 2.0 + 0.5) * 255.0).astype(np.uint8)
            Image.fromarray(img).save("generate/%d.png" % (epoch))

    backend.clear_session()
embedding_layer = Embedding(num_words,
                            EMBEDDING_DIM,
                            weights=[embedding_matrix],
                            input_length=MAX_SEQUENCE_LENGTH,
                            trainable=False)

print('Training model.')

# train a 1D convnet with global maxpooling
sequence_input = Input(shape=(MAX_SEQUENCE_LENGTH,), dtype='int32')
embedded_sequences = embedding_layer(sequence_input)
x = Conv1D(128, 5, activation='relu')(embedded_sequences)
x = MaxPooling1D(5)(x)
x = Conv1D(128, 5, activation='relu')(x)
x = MaxPooling1D(5)(x)
x = Conv1D(128, 5, activation='relu')(x)
x = MaxPooling1D(35)(x)
x = Flatten()(x)
x = Dense(128, activation='relu')(x)
preds = Dense(len(labels_index), activation='softmax')(x)

model = Model(sequence_input, preds)
model.compile(loss='categorical_crossentropy',
              optimizer='rmsprop',
              metrics=['acc'])

model.fit(x_train, y_train,
          batch_size=128,
          epochs=10,
          validation_data=(x_val, y_val))
示例#22
0
def train(data,
          file_name,
          nlayer,
          num_epochs=10,
          batch_size=128,
          train_temp=1,
          init=None,
          activation=tf.nn.relu):
    """
    Train a n-layer CNN for MNIST and CIFAR
    """
    inputs = Input(shape=(28, 28, 1))
    if nlayer == 2:
        x = Residual2(8, activation)(inputs)
        x = Lambda(activation)(x)
        x = Residual2(16, activation)(x)
        x = Lambda(activation)(x)
        x = AveragePooling2D(pool_size=7)(x)
        x = Flatten()(x)
        x = Dense(10)(x)
    if nlayer == 3:
        x = Residual2(8, activation)(inputs)
        x = Lambda(activation)(x)
        x = Residual(8, activation)(x)
        x = Lambda(activation)(x)
        x = Residual2(16, activation)(x)
        x = Lambda(activation)(x)
        x = AveragePooling2D(pool_size=7)(x)
        x = Flatten()(x)
        x = Dense(10)(x)
    if nlayer == 4:
        x = Residual2(8, activation)(inputs)
        x = Lambda(activation)(x)
        x = Residual(8, activation)(x)
        x = Lambda(activation)(x)
        x = Residual2(16, activation)(x)
        x = Lambda(activation)(x)
        x = Residual(16, activation)(x)
        x = Lambda(activation)(x)
        x = AveragePooling2D(pool_size=7)(x)
        x = Flatten()(x)
        x = Dense(10)(x)
    if nlayer == 5:
        x = Residual2(8, activation)(inputs)
        x = Lambda(activation)(x)
        x = Residual(8, activation)(x)
        x = Lambda(activation)(x)
        x = Residual(8, activation)(x)
        x = Lambda(activation)(x)
        x = Residual2(16, activation)(x)
        x = Lambda(activation)(x)
        x = Residual(16, activation)(x)
        x = Lambda(activation)(x)
        x = AveragePooling2D(pool_size=7)(x)
        x = Flatten()(x)
        x = Dense(10)(x)

    model = Model(inputs=inputs, outputs=x)

    # load initial weights when given
    if init != None:
        model.load_weights(init)

    # define the loss function which is the cross entropy between prediction and true label
    def fn(correct, predicted):
        return tf.nn.softmax_cross_entropy_with_logits(labels=correct,
                                                       logits=predicted /
                                                       train_temp)

    # initiate the Adam optimizer
    sgd = Adam()

    # compile the Keras model, given the specified loss and optimizer
    model.compile(loss=fn, optimizer=sgd, metrics=['accuracy'])

    model.summary()
    # run training with given dataset, and print progress
    history = model.fit(data.train_data,
                        data.train_labels,
                        batch_size=batch_size,
                        validation_data=(data.validation_data,
                                         data.validation_labels),
                        epochs=num_epochs,
                        shuffle=True)

    # save model to a file
    if file_name != None:
        model.save(file_name)

    return {'model': model, 'history': history}
示例#23
0
def main():

    if os.path.isfile(macro._LOCAL_SAVE_DATA) == 0:

        # download data and compute featuers (see "download_data.py")
        # atomic_numbers use to compute composition vector
        # labels is target properties (formation energy)
        train_labels, compositions, features, atomic_numbers = dl.get_data()

        # compute bag-of-atom vector that trains GAN (see "preprocess.py")
        boa_vectors = pre.compute_bag_of_atom_vector(compositions,
                                                     atomic_numbers)
        train_data = np.concatenate([boa_vectors, features], axis=1)

        save_data = pd.DataFrame(
            np.concatenate([train_labels, train_data], axis=1))
        save_data.to_csv(macro._LOCAL_SAVE_DATA, index=False, header=False)

    else:
        data = pd.read_csv(macro._LOCAL_SAVE_DATA,
                           delimiter=',',
                           engine="python",
                           header=None)
        data = np.array(data)
        train_labels, train_data = np.split(data, [1], axis=1)

    # normalization of training data such that min is 0 and max is 1 (see "preprocess.py")
    normalized_train_data, data_max, data_min = pre.normalize_for_train(
        train_data)
    normalized_train_labels, max_train_prop, min_train_prop = pre.normalize_for_train(
        train_labels)

    # Save normalization parameter to .csv to use generation
    save_data = pd.DataFrame(
        np.concatenate([max_train_prop, min_train_prop, data_max, data_min],
                       axis=0))
    save_data.to_csv(macro._SAVE_NORMALIZATION_PARAM,
                     index=False,
                     header=False)

    ### start initialization of training GAN ###

    # set hyperparameters
    batch_size = macro._BATCH_SIZE  # batch size
    noise_dim = macro._NOISE_DIM  # dimension of noise to input generator
    property_dim = macro._PROP_DIM  # the number of properties
    lamb = macro._LAMB  # hyperparameter for W-GAN-GP
    max_epoch = macro._MAX_EPOCH  # maximum iteration of outer loop
    max_train_only_dis = macro._MAX_EPOCH_TRAIN_DISCRIMINATOR  # maximum iteration of inner loop defined by W-GAN-GP paper (https://arxiv.org/pdf/1704.00028.pdf)
    max_loop = int(train_data.shape[0] / batch_size)

    # set model (see "model.py")
    # in this code, we apply AC-GAN based network architecture (https://arxiv.org/abs/1610.09585)
    # difference between AC-GAN is that our model is the regression, not classification
    gen = model.generator(normalized_train_data.shape[1])
    dis = model.discriminator(normalized_train_data.shape[1])

    # rf is the output layer of discriminator that discriminates real or fake
    rf = model.real_fake()

    # pred is the output layer of discriminator that predicts target property
    pred = model.prediction()

    # set optimization method
    dis_opt = Adam(lr=1.0e-4, beta_1=0.0, beta_2=0.9)
    gen_opt = Adam(lr=1.0e-4, beta_1=0.0, beta_2=0.9)

    # first set discriminator's parameters for training
    gen.trainable = False  # generator's parameter does not update
    dis.trainable = True
    rf.trainable = True
    pred.trainable = True

    # set variables when inputting real data
    real_inputs = Input(shape=normalized_train_data.shape[1:])
    dis_real_outputs = dis(real_inputs)
    real_fake_from_real = rf(dis_real_outputs)
    predictions_from_real = pred(dis_real_outputs)

    # set variables when inputting fake data
    fake_inputs = Input(shape=(noise_dim + property_dim, ))
    gen_fake_outputs = gen(fake_inputs)
    dis_fake_outputs = dis(gen_fake_outputs)
    real_fake_from_fake = rf(dis_fake_outputs)

    # set loss function for discriminator
    # in this case, we apply W-GAN-GP based loss function because of improving stability
    # W-GAN-GP (https://arxiv.org/pdf/1704.00028.pdf)
    # W-GAN-GP is unsupervised training, on the other hand, our model is supervised (conditional).
    # So, we apply wasserstein_loss to real_fake part and apply mean_squared_error to prediction part
    interpolate = model.RandomWeightedAverage()(
        [real_inputs, gen_fake_outputs])
    dis_interpolate_outputs = dis(interpolate)
    real_fake_interpolate = rf(dis_interpolate_outputs)

    # gradient penalty of W-GAN-GP
    gp_reg = partial(model.gradient_penalty,
                     interpolate=interpolate,
                     lamb=lamb)
    gp_reg.__name__ = 'gradient_penalty'

    # connect inputs and outputs of the discriminator
    # prediction part is trained by only using training dataset (i.e., predict part is not trained by generated samples)
    dis_model = Model(inputs=[real_inputs, fake_inputs],\
    outputs=[real_fake_from_real, real_fake_from_fake, real_fake_interpolate, predictions_from_real])

    # compile
    dis_model.compile(loss=[model.wasserstein_loss,model.wasserstein_loss,\
    gp_reg,'mean_squared_error'],optimizer=dis_opt)

    # second set generator's parameters for training
    gen.trainable = True  # generator's parameters only update
    dis.trainable = False
    rf.trainable = False
    pred.trainable = False

    # set variables when inputting noise and target property
    gen_inputs = Input(shape=(noise_dim + property_dim, ))
    gen_outputs = gen(gen_inputs)

    # set variables for discriminator when inputting fake data
    dis_outputs = dis(gen_outputs)
    real_fake = rf(dis_outputs)
    predictions = pred(dis_outputs)

    # connect inputs and outputs of the discriminator
    gen_model = Model(inputs=[gen_inputs], outputs=[real_fake, predictions])

    # compile
    # generator is trained by real_fake classification and prediction of target property
    gen_model.compile(loss=[model.wasserstein_loss, 'mean_squared_error'],
                      optimizer=gen_opt)

    # if you need progress bar
    progbar = Progbar(target=max_epoch)

    # set the answer to train each model
    real_label = [-1] * batch_size
    fake_label = [1] * batch_size
    dummy_label = [0] * batch_size

    #real = np.zeros((batch_size,train_data.shape[1]), dtype=np.float32)
    inputs = np.zeros((batch_size, noise_dim + property_dim), dtype=np.float32)

    # epoch
    for epoch in range(max_epoch):

        # iteration
        for loop in range(max_loop):

            # shuffle to change the trainng order and select data
            sdata, slabels, bak = pre.paired_shuffle(normalized_train_data,
                                                     normalized_train_labels)
            real = sdata[loop * batch_size:(loop + 1) * batch_size]
            properties = slabels[loop * batch_size:(loop + 1) * batch_size]

            # generator's parameters does not update
            gen.trainable = False
            dis.trainable = True
            rf.trainable = True
            pred.trainable = True

            # train discriminator
            for train_only_dis in range(max_train_only_dis):
                noise = np.random.uniform(
                    -1, 1, (batch_size, noise_dim)).astype(np.float32)
                for i in range(len(noise)):
                    inputs[i] = np.hstack((noise[i], properties[i]))
                dis_loss = dis_model.train_on_batch(
                    [real, inputs],
                    [real_label, fake_label, dummy_label, properties])

            # second train only generator
            gen.trainable = True
            dis.trainable = False
            rf.trainable = False
            pred.trainable = False
            noise = np.random.uniform(-1, 1, (batch_size, noise_dim)).astype(
                np.float32)
            for i in range(len(noise)):
                inputs[i] = np.hstack((noise[i], properties[i]))
            gen_loss = gen_model.train_on_batch([inputs],
                                                [real_label, properties])

        # if you need progress bar
        progbar.add(1,
                    values=[("dis_loss", dis_loss[0]),
                            ("gen_loss", gen_loss[0])])

    # save generated samples and models
    eval.save(normalized_train_data, gen, dis, pred, rf)

    backend.clear_session()
示例#24
0
def transfer_model(source_df, target_df, test_df, method_flag, fold_num):

	source_labels, source_data = np.split(np.array(source_df),[1],axis=1)
	target_labels, target_data = np.split(np.array(target_df),[1],axis=1)
	test_labels, test_data = np.split(np.array(test_df),[1],axis=1)

	# normalization
	#normalized_source_data = pre.normalize(source_data)
	#normalized_target_data = pre.normalize(target_data)
	#normalized_test_data = pre.normalize(test_data)
	normalized_source_data = source_data
	normalized_target_data = target_data
	normalized_test_data = test_data


	### constuct model for source domain task  ###

	# optimization
	opt = Adam()

	# network setting
	latent = models.latent(normalized_source_data.shape[1])
	sll = models.source_last_layer()
	tll = models.target_last_layer()

	source_inputs = Input(shape=normalized_source_data.shape[1:])
	latent_features = latent(source_inputs)
	source_predictors = sll(latent_features)

	latent.trainable = mc._SORUCE_LATENT_TRAIN
	source_predictors.trainable = True

	source_nn = Model(inputs=[source_inputs], outputs=[source_predictors])
	source_nn.compile(loss=['mean_squared_error'],optimizer=opt)
	#source_nn.summary()

	# training using source domain data
	if method_flag != mc._SCRATCH:
		source_max_loop = int(normalized_source_data.shape[0]/mc._BATCH_SIZE)
		source_progbar = Progbar(target=mc._SOURCE_EPOCH_NUM)
		for epoch in range(mc._SOURCE_EPOCH_NUM):
			shuffle_data, shuffle_labels, _ = pre.paired_shuffle(normalized_source_data,source_labels,1)

			for loop in range(source_max_loop):
				batch_train_data = shuffle_data[loop*mc._BATCH_SIZE:(loop+1)*mc._BATCH_SIZE]
				batch_train_labels = shuffle_labels[loop*mc._BATCH_SIZE:(loop+1)*mc._BATCH_SIZE]
				batch_train_labels = np.reshape(batch_train_labels, [len(batch_train_labels)])
				one_hots = np.identity(mc._SOURCE_DIM_NUM)[np.array(batch_train_labels, dtype=np.int32)]
				loss = source_nn.train_on_batch([batch_train_data],[one_hots])

			#source_progbar.add(1, values=[("source loss",loss)])

		# save
		#latent.save('../results/source_latent.h5')
		#sll.save('../results/source_last_layer.h5')

	# compute relation vectors
	if method_flag == mc._SCRATCH or method_flag == mc._CONV_TRANSFER:
		target_vectors = np.identity(mc._TARGET_DIM_NUM)[np.array(target_labels, dtype=np.int32)]
		target_vectors = np.reshape(target_vectors, [target_vectors.shape[0], target_vectors.shape[2]])
	elif method_flag == mc._COUNT_ATDL:
		target_labels, relations = rv.compute_relation_labels(source_nn, normalized_target_data, target_labels, fold_num)
		target_vectors = np.identity(mc._SOURCE_DIM_NUM)[np.array(target_labels, dtype=np.int32)]
		target_vectors = np.reshape(target_vectors, [target_vectors.shape[0], target_vectors.shape[2]])
	else:
		relation_vectors = rv.compute_relation_vectors(source_nn, normalized_target_data, target_labels, fold_num, method_flag)
		target_vectors = np.zeros((len(target_labels),mc._SOURCE_DIM_NUM), dtype=np.float32)
		for i in range(len(target_labels)):
			target_vectors[i] = relation_vectors[int(target_labels[i])]

	### tuning model for target domain task	 ###

	latent.trainable = mc._TARGET_LATENT_TRAIN
	target_inputs = Input(shape=normalized_target_data.shape[1:])
	latent_features = latent(target_inputs)
	if method_flag == mc._SCRATCH or method_flag == mc._CONV_TRANSFER:
		predictors = tll(latent_features)
		label_num = mc._TARGET_DIM_NUM
	else:
		predictors= sll(latent_features)
		label_num = mc._SOURCE_DIM_NUM

	target_nn = Model(inputs=[target_inputs], outputs=[predictors])
	target_nn.compile(loss=['mean_squared_error'],optimizer=opt)
	#target_nn.summary()

	# training using target domain data
	target_max_loop = int(normalized_target_data.shape[0]/mc._BATCH_SIZE)
	target_progbar = Progbar(target=mc._TARGET_EPOCH_NUM)
	for epoch in range(mc._TARGET_EPOCH_NUM):

		shuffle_data, shuffle_labels, _ = \
		pre.paired_shuffle(normalized_target_data, target_vectors, label_num)
		for loop in range(target_max_loop):
			batch_train_data = shuffle_data[loop*mc._BATCH_SIZE:(loop+1)*mc._BATCH_SIZE]
			batch_train_labels = shuffle_labels[loop*mc._BATCH_SIZE:(loop+1)*mc._BATCH_SIZE]
			loss = target_nn.train_on_batch([batch_train_data],[batch_train_labels])
		#target_progbar.add(1, values=[("target loss",loss)])


	# compute outputs of test data of target domain
	x = target_nn.predict([normalized_test_data])
	if method_flag == mc._SCRATCH or method_flag == mc._CONV_TRANSFER:
		idx = np.argmax(x, axis=1)
	elif method_flag == mc._COUNT_ATDL:
		idx = np.argmax(x,axis=1)
		for j in range(len(test_labels)):
			for i in range(mc._TARGET_DIM_NUM):
				if test_labels[j] == i:
					test_labels[j] = relations[i]
					break
	else:
		distance, idx = Neighbors(x, relation_vectors, 1)
		idx = idx[:,0]

	backend.clear_session()
	return idx.T, test_labels.T