示例#1
0
def LoadModel():
	global D
	D=None
	D=load_model('./tdoa1.h5')
	if D==None:
		i=Input(shape=(M,4))
		print("1=====",i)
		a=Flatten()(i)
		a=Dense(200,activation='relu')(a)
		a=Dense(160,activation='relu')(a)
		a=Dense(120,activation='relu')(a)
		a=Dense(80,activation='relu')(a)
		a=Dense(80,activation='relu')(a)
		a=Dense(80,activation='relu')(a)
		a=Dense(60,activation='relu')(a)
		a=Dense(60,activation='relu')(a)
		a=Dense(60,activation='relu')(a)
		a=Dense(60,activation='relu')(a)
		a=Dense(60,activation='relu')(a)
		a=Dense(60,activation='relu')(a)
		a=Dense(60,activation='relu')(a)
		a=Dense(60,activation='relu')(a)
		a=Dense(40,activation='relu')(a)
		a=Dense(40,activation='relu')(a)
		a=Dense(40,activation='relu')(a)
		a=Dense(40,activation='relu')(a)
		a=Dense(40,activation='relu')(a)
		a=Dense(40,activation='relu')(a)
		a=Dense(20,activation='relu')(a)
		a=Dense(10,activation='relu')(a)
		o=Dense(2,activation='tanh')(a)
		D=Model(inputs=i,outputs=o)
		D.compile(loss='mse',optimizer='adam',metrics=['accuracy'])
示例#2
0
    def __init__(self,
                 session,
                 feature_layers=None,
                 feature_weights=None,
                 gram_weights=None):
        K.set_session(session)
        self.base_model = VGG19(include_top=False, weights='imagenet')
        if feature_layers is None:
            feature_layers = [
                "input_1", "block1_conv2", "block2_conv2", "block3_conv2",
                "block4_conv2", "block5_conv2"
            ]
        self.layer_names = [l.val_set_name for l in self.base_model.layers]
        for k in feature_layers:
            if not k in self.layer_names:
                raise KeyError("Invalid layer {}. Available layers: {}".format(
                    k, self.layer_names))
        features = [
            self.base_model.get_layer(k).output for k in feature_layers
        ]
        self.model = Model(inputs=self.base_model.input, outputs=features)
        if feature_weights is None:
            feature_weights = len(feature_layers) * [1.0]
        if gram_weights is None:
            gram_weights = len(feature_layers) * [0.1]
        self.feature_weights = feature_weights
        self.gram_weights = gram_weights
        assert len(self.feature_weights) == len(features)
        self.use_gram = np.max(self.gram_weights) > 0.0

        self.variables = self.base_model.weights
示例#3
0
 def __init__(self, nn_type="resnet50", restore = None, session=None, use_imagenet_pretrain=False, use_softmax=True):
     self.image_size = 224
     self.num_channels = 3
     self.num_labels = 8
 
     input_layer = Input(shape=(self.image_size, self.image_size, self.num_channels))
     weights = "imagenet" if use_imagenet_pretrain else None
     if nn_type == "resnet50":
         base_model = ResNet50(weights=weights, input_tensor=input_layer)
     elif nn_type == "vgg16":
         base_model = VGG16(weights=weights, input_tensor=input_layer)
         # base_model = VGG16(weights=None, input_tensor=input_layer)
     x = base_model.output
     x = LeakyReLU()(x)
     x = Dense(1024)(x)
     x = Dropout(0.2)(x)
     x = LeakyReLU()(x)
     x = Dropout(0.3)(x)
     x = Dense(8)(x)
     if use_softmax:
         x = Activation("softmax")(x)
     model = Model(inputs=base_model.input, outputs=x)
 
     # for layer in base_model.layers:
     # 	layer.trainable = False
 
 
     if restore:
         print("Load: {}".format(restore))
         model.load_weights(restore)
 
     self.model = model
    def __init__(self):
        self.growth_rate = 32
        in_ = Input(shape=(224, 224, 3))
        self.num_dense_block = 4

        # Layer 1:
        x = Conv2D(64, (7, 7), (2, 2), padding="SAME")(in_)
        x = BatchNormalization()(x)
        x = Activation("relu")(x)
        x = MaxPool2D((3, 3), (2, 2), padding="VALID")(x)

        filter = 64
        num_node_each_layer = [6, 12, 24, 16]
        for i in range(self.num_dense_block):
            x, filter = dense_block(x,
                                    num_node_each_layer[i],
                                    filter,
                                    growth_rate=32)
            if i != self.num_dense_block - 1:
                x = transition_block(x, filter, 1.0)
                filter = filter * 1.0

        # Output from loop statement, x still in conv layer
        x = BatchNormalization()(x)
        x = Activation("relu")(x)

        x = GlobalAveragePooling2D()(x)
        x = Dense(1000, activation="softmax")(x)
        model = Model(inputs=in_, outputs=x)
        model.summary()
        self.model = model
示例#5
0
    def on_epoch_end(self, epoch, logs=None):
        self.losses.append(logs.get('loss'))
        self.num_epochs = epoch
        self.losses.append(logs.get('loss'))
        if epoch % 6 != 0: return
        # return
        intermediate_layer_model = Model(
            inputs=self.model.input,
            outputs=self.model.get_layer('abundances').output)
        abundances = intermediate_layer_model.predict(self.input)
        endmembers = self.model.layers[len(self.model.layers) -
                                       1].get_weights()[0]
        plotHist(self.losses, 33)
        if self.plotGT:
            dict = order_endmembers(endmembers, self.endmembersGT)
            # if self.is_GT_for_A:
            #     plotAbundances(self.num_endmembers, self.size_data, abundances, self.abundancesGT, dict, self.use_ASC)
            # else:
            #     plotAbundances(self.num_endmembers, self.size_data, abundances, None, None, self.use_ASC, is_GT=False)
            plotEndmembersAndGT(endmembers, self.endmembersGT, dict)
            # plotAbundancesSimple(self.num_endmembers, self.size_data, abundances, dict, use_ASC=1, figure_nr=10)
        else:
            # plotAbundances(self.num_endmembers, self.size_data, abundances, None, None, self.use_ASC)
            plotEndmembers(self.num_endmembers, endmembers)

        print(K.eval(self.model._collected_trainable_weights[-3]))
        return
示例#6
0
def CreateSimpleImageModel_512():
	dataIn = Input(shape=(3,))
	layer = Dense(4 * 4, activation='tanh')(dataIn)
	layer = Dense(512 * 512 * 4, activation='linear')(layer)
	layer = Reshape((1, 512, 512, 4))(layer)
	modelOut = layer
	model = Model(inputs=[dataIn], outputs=[modelOut])
	adam = Adam(lr=0.005, decay=0.0001)
	model.compile(loss='mean_squared_error', optimizer=adam, metrics=['accuracy'])
	return model
示例#7
0
def CreateSimpleImageModel_512():
    dataIn = Input(shape=(3, ))
    layer = Dense(4 * 4, activation='tanh')(dataIn)
    layer = Dense(512 * 512 * 4, activation='linear')(layer)
    layer = Reshape((1, 512, 512, 4))(layer)
    modelOut = layer
    model = Model(inputs=[dataIn], outputs=[modelOut])
    adam = Adam(lr=0.005, decay=0.0001)
    model.compile(loss='mean_squared_error',
                  optimizer=adam,
                  metrics=['accuracy'])
    return model
示例#8
0
def ResNet34V2_model():
    inpt = Input(shape=(224, 224, 3))
    x = ZeroPadding2D((3, 3))(inpt)
    x = _BN_ReLU_Conv2d(x,
                        nb_filter=64,
                        kernel_size=(7, 7),
                        strides=(2, 2),
                        padding='valid')
    x = MaxPooling2D(pool_size=(3, 3), strides=(2, 2), padding='same')(x)
    # (56,56,64)
    x = Conv_Block(x, nb_filter=64, kernel_size=(3, 3))
    x = Conv_Block(x, nb_filter=64, kernel_size=(3, 3))
    x = Conv_Block(x, nb_filter=64, kernel_size=(3, 3))
    # (28,28,128)
    x = Conv_Block(x,
                   nb_filter=128,
                   kernel_size=(3, 3),
                   strides=(2, 2),
                   with_conv_shortcut=True)
    x = Conv_Block(x, nb_filter=128, kernel_size=(3, 3))
    x = Conv_Block(x, nb_filter=128, kernel_size=(3, 3))
    x = Conv_Block(x, nb_filter=128, kernel_size=(3, 3))
    # (14,14,256)
    x = Conv_Block(x,
                   nb_filter=256,
                   kernel_size=(3, 3),
                   strides=(2, 2),
                   with_conv_shortcut=True)
    x = Conv_Block(x, nb_filter=256, kernel_size=(3, 3))
    x = Conv_Block(x, nb_filter=256, kernel_size=(3, 3))
    x = Conv_Block(x, nb_filter=256, kernel_size=(3, 3))
    x = Conv_Block(x, nb_filter=256, kernel_size=(3, 3))
    x = Conv_Block(x, nb_filter=256, kernel_size=(3, 3))
    # (7,7,512)
    x = Conv_Block(x,
                   nb_filter=512,
                   kernel_size=(3, 3),
                   strides=(2, 2),
                   with_conv_shortcut=True)
    x = Conv_Block(x, nb_filter=512, kernel_size=(3, 3))
    x = Conv_Block(x, nb_filter=512, kernel_size=(3, 3))
    x = AveragePooling2D(pool_size=(7, 7))(x)
    x = Flatten()(x)
    # x = Dense(1000,activation='softmax')(x)
    x = [
        Dense(n_class, activation='softmax', name='P%d' % (i + 1))(x)
        for i in range(7)
    ]
    model = Model(inputs=inpt, outputs=x)
    model.compile(loss='categorical_crossentropy',
                  optimizer='sgd',
                  metrics=['accuracy'])
    return model
示例#9
0
    def _build_conv_model(self):
        input_layer = Input(tensor=self.obsPH)

        with tf.variable_scope(
                'DQN_std'):  # std graph must be constructed before tgt!
            model_layers = networks.build_conv(input_layer)
            DQN_std = Model(inputs=input_layer, outputs=model_layers)

        with tf.variable_scope('DQN_tgt'):
            model_layers = networks.build_conv(input_layer)
            DQN_tgt = Model(inputs=input_layer, outputs=model_layers)

        return DQN_std, DQN_tgt
示例#10
0
文件: vgg16.py 项目: fcse-is/iis
def load_vgg16(fc):
    """ Creates VGG16 model.

    :param fc: fully connected layer as output layer if true
    :type fc: bool
    :return: instance of VGG16 keras model
    :rtype: keras.Model
    """
    base_model = VGG16(include_top=True, weights='imagenet', input_shape=(224, 224, 3))
    if fc:
        model = Model(inputs=base_model.input, outputs=base_model.get_layer(name='fc2').output)
    else:
        model = Model(inputs=base_model.input, outputs=base_model.get_layer(name='block5_pool').output)
    model.trainable = False
    return model
示例#11
0
    def __init__(
        self,
        vgg,
        feature_weights=[1.0] * 6,
        use_gram=False,
        gram_weights=[0.1] * 6,
        eager=False,
        session=None,
    ):
        self.vgg = vgg
        self.feature_weights = feature_weights
        self.gram_weights = gram_weights
        self.use_gram = use_gram
        self.target_layers = [
            # "input_1",
            "block1_conv2",
            "block2_conv2",
            "block3_conv2",
            "block4_conv2",
            "block5_conv2",
        ]

        if eager:
            pass
        else:
            K.set_session(session)
        self.layer_names = [layer.name for layer in self.vgg.layers]
        for k in self.target_layers:
            if k not in self.layer_names:
                raise KeyError("Invalid layer {}. Available layers: {}".format(
                    k, self.layer_names))
        features = [self.vgg.get_layer(k).output for k in self.target_layers]
        self.model = Model(inputs=self.vgg.input, outputs=features)
        self.variables = self.vgg.weights
示例#12
0
    def build_discriminator(self):

        model = Sequential()

        model.add(
            Conv2D(32,
                   kernel_size=3,
                   strides=2,
                   input_shape=self.img_shape,
                   padding="same"))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.25))
        model.add(Conv2D(64, kernel_size=3, strides=2, padding="same"))
        model.add(ZeroPadding2D(padding=((0, 1), (0, 1))))
        model.add(BatchNormalization(momentum=0.8))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.25))
        model.add(Conv2D(128, kernel_size=3, strides=2, padding="same"))
        model.add(BatchNormalization(momentum=0.8))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.25))
        model.add(Conv2D(256, kernel_size=3, strides=1, padding="same"))
        model.add(BatchNormalization(momentum=0.8))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.25))
        model.add(Flatten())
        model.add(Dense(1, activation='sigmoid'))

        # model.summary()

        img = Input(shape=self.img_shape)
        validity = model(img)

        return Model(img, validity)
示例#13
0
def build_generator_dense():
    
    model = Sequential()

    # Add arbitrary layers
    first = True
    for size in generator_layers.split(":"):
        size = int(size)
        if first:
            model.add(Dense(size, input_shape=noise_shape, activation=generator_activation))
        else:
            model.add(Dense(size, activation=generator_activation))

        model.add(Dropout(dropout_value))
        first = False

    # Add the final layer
    model.add(Dense(  np.prod(url_shape) , activation="tanh"))
    model.add(Dropout(dropout_value))
    model.add(Reshape(url_shape))
    model.summary()

    # Build the model
    noise = Input(shape=noise_shape)
    gen = model(noise)

    return Model(noise, gen)
示例#14
0
    def __init__(
        self,
        session,
        feature_layers=None,
        feature_weights=None,
        gram_weights=None,
        default_gram=0.1,
        original_scale=False,
        eager=False,
    ):
        if eager:
            pass
        else:
            K.set_session(session)
        self.base_model = VGG19(include_top=False, weights="imagenet")
        if feature_layers is None:
            feature_layers = [
                "input_1",
                "block1_conv2",
                "block2_conv2",
                "block3_conv2",
                "block4_conv2",
                "block5_conv2",
            ]
        self.layer_names = [l.name for l in self.base_model.layers]
        for k in feature_layers:
            if not k in self.layer_names:
                raise KeyError("Invalid layer {}. Available layers: {}".format(
                    k, self.layer_names))
        self.feature_layers = feature_layers
        features = [
            self.base_model.get_layer(k).output for k in feature_layers
        ]
        self.model = Model(inputs=self.base_model.input, outputs=features)
        if feature_weights is None:
            feature_weights = len(feature_layers) * [1.0]
        if gram_weights is None:
            gram_weights = len(feature_layers) * [default_gram]
        elif isinstance(gram_weights, (int, float)):
            gram_weights = len(feature_layers) * [gram_weights]
        self.feature_weights = feature_weights
        self.gram_weights = gram_weights
        assert len(self.feature_weights) == len(features)
        self.use_gram = np.max(self.gram_weights) > 0.0
        self.original_scale = original_scale

        self.variables = self.base_model.weights
示例#15
0
 def inception(self, img_size=(299, 299), img_channels=3, output_size=1000):
     input_shape = Input(shape=(*img_size, img_channels))
     x = inception.inception_v3(input_shape,
                                num_classes=output_size,
                                aux_logits=True,
                                transform_input=False)
     model = Model(inputs=input_shape, outputs=x)
     self.classifier = model
示例#16
0
    def on_epoch_end(self, epoch, logs=None):
        self.losses.append(logs.get('loss'))
        self.num_epochs = epoch
        self.losses.append(logs.get('loss'))
        if epoch % self.plot_every_n != 0: return
        return
        if self.plotS:
            intermediate_layer_model = Model(
                inputs=self.model.input,
                outputs=self.model.get_layer('abundances').output)
            abundances = intermediate_layer_model.predict(self.input)
            if self.size is None:
                self.size = (int(np.sqrt(abundances.shape[0])),
                             int(np.sqrt(abundances.shape[0])))
        endmembers = self.model.layers[len(self.model.layers) -
                                       1].get_weights()[0]

        # plotHist(self.losses, 33)
        self.plotGT = True
        if self.plotGT:
            dict = order_endmembers(endmembers, self.endmembersGT)
            # if self.is_GT_for_A:
            #     plotAbundances(self.num_endmembers, self.size_data, abundances, self.abundancesGT, dict, self.use_ASC)
            # else:
            #     plotAbundances(self.num_endmembers, self.size_data, abundances, None, None, self.use_ASC, is_GT=False)
            plotEndmembersAndGT(self.endmembersGT, endmembers, dict)
            if self.plotS:
                plotAbundancesSimple(self.num_endmembers,
                                     (self.size[0], self.size[1]),
                                     abundances,
                                     dict,
                                     use_ASC=1,
                                     figure_nr=10)
        else:
            # plotAbundances(self.num_endmembers, self.size_data, abundances, None, None, self.use_ASC)
            plotEndmembers(self.num_endmembers, endmembers)
            plotAbundancesSimple(self.num_endmembers,
                                 (self.size[0], self.size[1]),
                                 abundances,
                                 dict=None,
                                 use_ASC=1,
                                 figure_nr=10)
        return
示例#17
0
def load_vgg16():
    '''Method to load the VGG16 model'''
    base_model = VGG16(include_top=True,
                       weights='imagenet',
                       input_shape=(224, 224, 3))

    model = Model(inputs=base_model.input,
                  outputs=base_model.get_layer(name="fc2").output)

    return model
示例#18
0
文件: dmnn.py 项目: tobytoy/MotionGAN
    def __init__(self, config):
        self.name = config.model_type + '_' + config.model_version
        self.data_set = config.data_set
        self.batch_size = config.batch_size
        self.num_actions = config.num_actions
        self.seq_len = config.pick_num if config.pick_num > 0 else (
                       config.crop_len if config.crop_len > 0 else None)
        self.njoints = config.njoints
        self.body_members = config.body_members
        self.dropout = config.dropout

        real_seq = Input(
            batch_shape=(self.batch_size, self.njoints, self.seq_len, 3),
            name='real_seq', dtype='float32')

        pred_action = self.classifier(real_seq)

        self.model = Model(real_seq, pred_action, name=self.name)
        self.model.compile(Adam(lr=config.learning_rate), 'sparse_categorical_crossentropy', ['accuracy'])
示例#19
0
def feature_extraction(sz_input, sz_input2):
    i = Input(shape=(sz_input, sz_input2, 1))
    firstconv = convbn(i, 4, 3, 1, 1)
    firstconv = Activation('relu')(firstconv)
    firstconv = convbn(firstconv, 4, 3, 1, 1)
    firstconv = Activation('relu')(firstconv)

    layer1 = _make_layer(firstconv, 4, 2, 1, 1)  # (?, 32, 32, 4)
    layer2 = _make_layer(layer1, 8, 8, 1, 1)  # (?, 32, 32, 8)
    layer3 = _make_layer(layer2, 16, 2, 1, 1)  # (?, 32, 32, 16)
    layer4 = _make_layer(layer3, 16, 2, 1, 2)  # (?, 32, 32, 16)
    layer4_size = (layer4.get_shape().as_list()[1],
                   layer4.get_shape().as_list()[2])

    branch1 = AveragePooling2D((2, 2), (2, 2),
                               'same',
                               data_format='channels_last')(layer4)
    branch1 = convbn(branch1, 4, 1, 1, 1)
    branch1 = Activation('relu')(branch1)
    branch1 = UpSampling2DBilinear(layer4_size)(branch1)

    branch2 = AveragePooling2D((4, 4), (4, 4),
                               'same',
                               data_format='channels_last')(layer4)
    branch2 = convbn(branch2, 4, 1, 1, 1)
    branch2 = Activation('relu')(branch2)
    branch2 = UpSampling2DBilinear(layer4_size)(branch2)

    branch3 = AveragePooling2D((8, 8), (8, 8),
                               'same',
                               data_format='channels_last')(layer4)
    branch3 = convbn(branch3, 4, 1, 1, 1)
    branch3 = Activation('relu')(branch3)
    branch3 = UpSampling2DBilinear(layer4_size)(branch3)

    branch4 = AveragePooling2D((16, 16), (16, 16),
                               'same',
                               data_format='channels_last')(layer4)
    branch4 = convbn(branch4, 4, 1, 1, 1)
    branch4 = Activation('relu')(branch4)
    branch4 = UpSampling2DBilinear(layer4_size)(branch4)

    output_feature = concatenate(
        [layer2, layer4, branch4, branch3, branch2, branch1], )
    lastconv = convbn(output_feature, 16, 3, 1, 1)
    lastconv = Activation('relu')(lastconv)
    lastconv = Conv2D(4,
                      1, (1, 1),
                      'same',
                      data_format='channels_last',
                      use_bias=False)(lastconv)
    print(lastconv.get_shape())
    model = Model(inputs=[i], outputs=[lastconv])

    return model
示例#20
0
def build_models(seq_len=12, num_classes=4, load_weights=False):
    # DST-Net: ResNet50
    resnet = ResNet50(weights='imagenet', include_top=False)
    for layer in resnet.layers:
        layer.trainable = False
    resnet.load_weights('model/resnet.h5')
    # DST-Net: Conv3D + Bi-LSTM
    inputs = Input(shape=(seq_len, 7, 7, 2048))
    # conv1_1, conv3D and flatten
    conv1_1 = TimeDistributed(Conv2D(128, 1, 1, activation='relu'))(inputs)
    conv3d = Conv3D(64, 3, 1, 'SAME', activation='relu')(conv1_1)
    flatten = Reshape(target_shape=(seq_len, 7 * 7 * 64))(conv3d)
    # 2 Layers Bi-LSTM
    bilstm_1 = Bidirectional(LSTM(128, dropout=0.5,
                                  return_sequences=True))(flatten)
    bilstm_2 = Bidirectional(LSTM(128, dropout=0.5,
                                  return_sequences=False))(bilstm_1)
    outputs = Dense(num_classes, activation='softmax')(bilstm_2)
    dstnet = Model(inputs=inputs, outputs=outputs)
    dstnet.compile(loss='categorical_crossentropy',
                   optimizer=SGD(lr=0.001, momentum=0.9, nesterov=True))
    # load models
    if load_weights:
        dstnet.load_weights('model/dstnet.h5')
    return resnet, dstnet
示例#21
0
def get_model(filters_count, conv_depth, learning_rate, input_shape=(512, 512, 9)):
    # input shape=512x512x9 ?灰度化的9个图?
    input_90d = Input(shape=input_shape, name='input_90d')
    input_0d = Input(shape=input_shape, name='input_0d')
    input_45d = Input(shape=input_shape, name='input_45d')
    input_m45d = Input(shape=input_shape, name='input_m45d')

    # 4 Stream layer
    stream_ver = layersP1_multistream(input_shape, int(filters_count))(input_90d)
    stream_hor = layersP1_multistream(input_shape, int(filters_count))(input_0d)
    stream_45d = layersP1_multistream(input_shape, int(filters_count))(input_45d)
    stream_m45d = layersP1_multistream(input_shape, int(filters_count))(input_m45d)

    # merge streams
    merged = concatenate([stream_ver,stream_hor, stream_45d, stream_m45d], name='merged')

    # layers part2: conv-relu-bn-conv-relu
    merged = layersP2_merged(input_shape=(input_shape[0], input_shape[1], int(filters_count) * 4),
                             filters_count=int(filters_count) * 4,
                             conv_depth=conv_depth)(merged)

    # output
    output = layersP3_output(input_shape=(input_shape[0], input_shape[1], int(filters_count) * 4),
                             filters_count=int(filters_count) * 4)(merged)

    mymodel = Model(inputs=[input_90d,input_0d, input_45d, input_m45d], outputs=[output])

    optimizer = RMSprop(lr=learning_rate)
    mymodel.compile(optimizer=optimizer, loss='mae')
    mymodel.summary()

    return mymodel
示例#22
0
def define_epinet(sz_input,sz_input2,view_n,conv_depth,filt_num,learning_rate):

    ''' 4-Input : Conv - Relu - Conv - BN - Relu ''' 
    input_stack_90d = Input(shape=(sz_input,sz_input2,len(view_n)), name='input_stack_90d')
    input_stack_0d= Input(shape=(sz_input,sz_input2,len(view_n)), name='input_stack_0d')
    input_stack_45d= Input(shape=(sz_input,sz_input2,len(view_n)), name='input_stack_45d')
    input_stack_M45d= Input(shape=(sz_input,sz_input2,len(view_n)), name='input_stack_M45d')
    
    ''' 4-Stream layer : Conv - Relu - Conv - BN - Relu ''' 
    mid_90d=layer1_multistream(sz_input,sz_input2,len(view_n),int(filt_num))(input_stack_90d)
    mid_0d=layer1_multistream(sz_input,sz_input2,len(view_n),int(filt_num))(input_stack_0d)    
    mid_45d=layer1_multistream(sz_input,sz_input2,len(view_n),int(filt_num))(input_stack_45d)    
    mid_M45d=layer1_multistream(sz_input,sz_input2,len(view_n),int(filt_num))(input_stack_M45d)   

    ''' Merge layers ''' 
    mid_merged = concatenate([mid_90d,mid_0d,mid_45d,mid_M45d],  name='mid_merged')
    
    ''' Merged layer : Conv - Relu - Conv - BN - Relu '''
    mid_merged_=layer2_merged(sz_input-6,sz_input2-6,int(4*filt_num),int(4*filt_num),conv_depth)(mid_merged)

    ''' Last Conv layer : Conv - Relu - Conv '''
    output=layer3_last(sz_input-18,sz_input2-18,int(4*filt_num),int(4*filt_num))(mid_merged_)

    model_512 = Model(inputs = [input_stack_90d,input_stack_0d,
                               input_stack_45d,input_stack_M45d], outputs = [output])
    opt = RMSprop(lr=learning_rate)
    model_512.compile(optimizer=opt, loss='mae')
    model_512.summary() 
    
    return model_512
示例#23
0
    def __init__(self):
        # Input shape
        self.img_rows = 28
        self.img_cols = 28
        self.channels = 1
        self.img_shape = (self.img_rows, self.img_cols, self.channels)
        self.latent_dim = 100

        optimizer = Adam(0.0002, 0.5)

        # Build and compile the discriminator
        self.discriminator = self.build_discriminator()

        # Build the generator
        self.generator = self.build_generator()

        # load weight
        self.load_weights_from_file()

        # Compile the discriminator
        self.discriminator.compile(loss='binary_crossentropy',
                                   optimizer=optimizer,
                                   metrics=['accuracy'])

        # The generator takes noise as input and generates imgs
        z = Input(shape=(self.latent_dim, ))
        img = self.generator(z)

        # For the combined model we will only train the generator
        self.discriminator.trainable = False

        # The discriminator takes generated images as input and determines validity
        valid = self.discriminator(img)

        # The combined model  (stacked generator and discriminator)
        # Trains the generator to fool the discriminator
        self.combined = Model(z, valid)
        self.combined.compile(loss='binary_crossentropy', optimizer=optimizer)
示例#24
0
    def _build_model(self):
        input_layer = Input(tensor=self.obsPH)
        if self.config.model_type == 'dense':
            model_layers = networks.build_dense(input_layer,
                                                self.config.layers,
                                                name_stem='dense_')
        elif self.config.model_type == 'conv':
            model_layers = networks.build_conv(input_layer)
        else:
            print("ERROR:", self.config.model_type,
                  "is an unrecognized model type.")

        model = Model(inputs=input_layer, outputs=model_layers)
        return model
示例#25
0
 def squeezenet(self,
                type,
                img_size=(64, 64),
                img_channels=3,
                output_size=1000):
     input_shape = Input(shape=(*img_size, img_channels))
     if type == 1:
         x = squeezenet.squeezenet1_0(input_shape, num_classes=output_size)
     elif type == 1.1:
         x = squeezenet.squeezenet1_1(input_shape, num_classes=output_size)
     else:
         print("请输入1,1.0这两个数字中的一个!")
     model = Model(inputs=input_shape, outputs=x)
     self.classifier = model
示例#26
0
def define_AttMLFNet(sz_input, sz_input2, view_n, learning_rate):
    """ 4 branches inputs"""
    input_list = []
    for i in range(len(view_n) * 4):
        input_list.append(Input(shape=(sz_input, sz_input2, 1)))
    """ 4 branches features"""
    feature_extraction_layer = feature_extraction(sz_input, sz_input2)
    feature_list = []
    for i in range(len(view_n) * 4):
        feature_list.append(feature_extraction_layer(input_list[i]))
    feature_v_list = []
    feature_h_list = []
    feature_45_list = []
    feature_135_list = []
    for i in range(9):
        feature_h_list.append(feature_list[i])
    for i in range(9, 18):
        feature_v_list.append(feature_list[i])
    for i in range(18, 27):
        feature_45_list.append(feature_list[i])
    for i in range(27, len(feature_list)):
        feature_135_list.append(feature_list[i])
    """ cost volume """
    cv_h = Lambda(_get_h_CostVolume_)(feature_h_list)
    cv_v = Lambda(_get_v_CostVolume_)(feature_v_list)
    cv_45 = Lambda(_get_45_CostVolume_)(feature_45_list)
    cv_135 = Lambda(_get_135_CostVolume_)(feature_135_list)
    """ intra branch """
    cv_h_3d, cv_h_ca = to_3d_h(cv_h)
    cv_v_3d, cv_v_ca = to_3d_v(cv_v)
    cv_45_3d, cv_45_ca = to_3d_45(cv_45)
    cv_135_3d, cv_135_ca = to_3d_135(cv_135)
    """ inter branch """
    cv, attention_4 = branch_attention(
        multiply([cv_h_3d, cv_v_3d, cv_45_3d, cv_135_3d]), cv_h_ca, cv_v_ca,
        cv_45_ca, cv_135_ca)
    """ cost volume regression """
    cost = basic(cv)

    cost = Lambda(lambda x: K.permute_dimensions(K.squeeze(x, -1),
                                                 (0, 2, 3, 1)))(cost)
    pred = Activation('softmax')(cost)
    pred = Lambda(disparityregression)(pred)

    model = Model(inputs=input_list, outputs=[pred])

    model.summary()

    opt = Adam(lr=learning_rate)

    model.compile(optimizer=opt, loss='mae')

    return model
示例#27
0
 def train(self,
           x_train,
           x_test,
           y_train,
           y_test,
           embedding_matrix,
           num_classes,
           seq_length=200,
           emb_dim=100,
           train_emb=True,
           windows=(3, 4, 5, 6),
           dropouts=(0.2, 0.4),
           filter_sz=100,
           hid_dim=100,
           bch_siz=50,
           epoch=8):
     #setup and train the nueral net
     from tensorflow.contrib.keras.api.keras.models import Model
     from tensorflow.contrib.keras.api.keras.layers import Activation, Dense, Dropout, Embedding, Flatten, Input, Concatenate, Conv1D, MaxPool1D
     inp = Input(shape=(seq_length, ))
     out = Embedding(input_dim=len(embedding_matrix[:, 1]),
                     output_dim=emb_dim,
                     input_length=seq_length,
                     weights=[embedding_matrix],
                     trainable=train_emb)(inp)
     out = Dropout(dropouts[0])(out)
     convs = []
     for w in windows:
         conv = Conv1D(filters=filter_sz,
                       kernel_size=w,
                       padding='valid',
                       activation='relu',
                       strides=1)(out)
         conv = MaxPool1D(pool_size=2)(conv)
         conv = Flatten()(conv)
         convs.append(conv)
     out = Concatenate()(convs)
     out = Dense(hid_dim, activation='relu')(out)
     out = Dropout(dropouts[1])(out)
     out = Activation('relu')(out)
     out = Dense(num_classes, activation='softmax')(out)
     model = Model(inp, out)
     model.compile(loss='categorical_crossentropy',
                   optimizer='nadam',
                   metrics=['accuracy'])
     model.fit(x_train,
               y_train,
               batch_size=bch_siz,
               epochs=epoch,
               verbose=2,
               validation_data=(x_test, y_test))
     return model
示例#28
0
def build_discriminator_dense():

    model = Sequential()
    model.add(Flatten(input_shape=url_shape))

    # Add arbitrary layers
    for size in discriminator_layers.split(":"):
        size = int(size)
        model.add(Dense(size, activation=discriminator_activation))
        model.add(Dropout(dropout_value))

    # Add the final layer, with a single output
    model.add(Dense(1, activation='sigmoid'))
    model.summary()

    # Build the model
    gen = Input(shape=url_shape)
    validity = model(gen)
    return Model(gen, validity)
示例#29
0
 def densenet(self,
              type,
              img_size=(299, 299),
              img_channels=3,
              output_size=1000):
     input_shape = Input(shape=(*img_size, img_channels))
     if type == 161:
         x = densenet.densenet161(input_shape, num_classes=output_size)
     elif type == 121:
         x = densenet.densenet121(input_shape, num_classes=output_size)
     elif type == 169:
         x = densenet.densenet169(input_shape, num_classes=output_size)
     elif type == 201:
         x = densenet.densenet201(input_shape, num_classes=output_size)
     else:
         print("请输入161,121,169,201这四个数字中的一个!")
         return
     model = Model(inputs=input_shape, outputs=x)
     self.classifier = model
示例#30
0
    def inference(self):
        ''' 4-Input : Conv - Relu - Conv - BN - Relu '''
        input_stack_90d = Input(shape=(self.img_height, self.img_width,
                                       len(self.view_n)),
                                name='input_stack_90d')
        input_stack_0d = Input(shape=(self.img_height, self.img_width,
                                      len(self.view_n)),
                               name='input_stack_0d')
        input_stack_45d = Input(shape=(self.img_height, self.img_width,
                                       len(self.view_n)),
                                name='input_stack_45d')
        input_stack_M45d = Input(shape=(self.img_height, self.img_width,
                                        len(self.view_n)),
                                 name='input_stack_M45d')
        ''' 4-Stream layer : Conv - Relu - Conv - BN - Relu '''
        mid_90d = self.layer1_multistream(self.img_height, self.img_width,
                                          len(self.view_n),
                                          int(self.filt_num))(input_stack_90d)
        mid_0d = self.layer1_multistream(self.img_height, self.img_width,
                                         len(self.view_n),
                                         int(self.filt_num))(input_stack_0d)
        mid_45d = self.layer1_multistream(self.img_height, self.img_width,
                                          len(self.view_n),
                                          int(self.filt_num))(input_stack_45d)
        mid_M45d = self.layer1_multistream(
            self.img_height, self.img_width, len(self.view_n),
            int(self.filt_num))(input_stack_M45d)
        ''' Merge layers '''
        mid_merged = concatenate([mid_90d, mid_0d, mid_45d, mid_M45d],
                                 name='mid_merged')
        ''' Merged layer : Conv - Relu - Conv - BN - Relu '''
        mid_merged_ = self.layer2_merged(self.img_height - 6,
                                         self.img_width - 6,
                                         int(4 * self.filt_num),
                                         int(4 * self.filt_num),
                                         self.conv_depth)(mid_merged)
        ''' Last Conv layer : Conv - Relu - Conv '''
        output = self.layer3_last(self.img_height - 18, self.img_width - 18,
                                  int(4 * self.filt_num),
                                  int(4 * self.filt_num))(mid_merged_)

        epinet = Model(inputs=[
            input_stack_90d, input_stack_0d, input_stack_45d, input_stack_M45d
        ],
                       outputs=[output])
        opt = RMSprop(lr=self.learning_rate)
        epinet.compile(optimizer=opt, loss='mae')
        epinet.summary()

        return epinet
示例#31
0
 def resnet(self,
            type,
            img_size=(64, 64),
            img_channels=3,
            output_size=1000):
     input_shape = Input(shape=(*img_size, img_channels))
     if type == 18:
         x = resnet.resnet18(input_shape, num_classes=output_size)
     elif type == 34:
         x = resnet.resnet34(input_shape, num_classes=output_size)
     elif type == 50:
         x = resnet.resnet50(input_shape, num_classes=output_size)
     elif type == 101:
         x = resnet.resnet101(input_shape, num_classes=output_size)
     elif type == 152:
         x = resnet.resnet152(input_shape, num_classes=output_size)
     else:
         print("请输入18,34,50,101,152这五个数字中的一个!")
         return
     model = Model(inputs=input_shape, outputs=x)
     self.classifier = model
embedding_layer = Embedding(num_words,
                            EMBEDDING_DIM,
                            weights=[embedding_matrix],
                            input_length=MAX_SEQUENCE_LENGTH,
                            trainable=False)

print('Training model.')

# train a 1D convnet with global maxpooling
sequence_input = Input(shape=(MAX_SEQUENCE_LENGTH,), dtype='int32')
embedded_sequences = embedding_layer(sequence_input)
x = Conv1D(128, 5, activation='relu')(embedded_sequences)
x = MaxPooling1D(5)(x)
x = Conv1D(128, 5, activation='relu')(x)
x = MaxPooling1D(5)(x)
x = Conv1D(128, 5, activation='relu')(x)
x = MaxPooling1D(35)(x)
x = Flatten()(x)
x = Dense(128, activation='relu')(x)
preds = Dense(len(labels_index), activation='softmax')(x)

model = Model(sequence_input, preds)
model.compile(loss='categorical_crossentropy',
              optimizer='rmsprop',
              metrics=['acc'])

model.fit(x_train, y_train,
          batch_size=128,
          epochs=10,
          validation_data=(x_val, y_val))