예제 #1
0
#실습
# cifar10으로 vgg16만들기
#결과치 비교
import numpy as np
from tensorflow.keras.datasets import cifar10
(x_train, y_train), (x_test, y_test) = cifar10.load_data()

print(x_train.shape)  # (50000,32,32,3)
print(x_test.shape)  # (50000,1)

from tensorflow.keras.applications import VGG19
from tensorflow.keras.layers import Dense, Flatten
from tensorflow.keras.models import Sequential

vgg19 = VGG19(weights='imagenet', include_top=False,
              input_shape=(32, 32,
                           3))  #원하는 사이즈는 include_top=False / 디폴트 224*224
# print(model.weights)

vgg19.trainable = False
# vgg16.summary()
# print(len(vgg16.weights))           # 26
# print(len(vgg16.trainable_weights)) # 0

model = Sequential()
model.add(vgg19)
model.add(Flatten())
model.add(Dense(10))
model.add(Dense(5))
model.add(Dense(1))  #, activation='softmax'))
model.summary()
예제 #2
0
 def subvgg(self):
     vgg = VGG19(input_shape=(self.hr_h, self.hr_w, 3), include_top=False, weights='imagenet')
     vgg.outputs = [vgg.layers[9].output]
     img_input = Input(shape=(self.hr_h, self.hr_w, 3))
     img_features = vgg(img_input)
     return Model(img_input, img_features)
 def vgg_layers(layer_names):
     vgg = VGG19(include_top=False, weights="imagenet")
     vgg.trainable = False
     outputs = [vgg.get_layer(name).output for name in layer_names]
     return Model([vgg.input], outputs)
예제 #4
0
    def __init__(
        self,
        dataset_name,
        light,
        source_domain,
        target_domain,
        gan_type,
        epochs,
        input_size,
        multi_scale,
        batch_size,
        sample_size,
        reporting_steps,
        content_lambda,
        style_lambda,
        g_adv_lambda,
        d_adv_lambda,
        generator_lr,
        discriminator_lr,
        data_dir,
        log_dir,
        result_dir,
        checkpoint_dir,
        generator_checkpoint_prefix,
        discriminator_checkpoint_prefix,
        pretrain_checkpoint_prefix,
        pretrain_model_dir,
        model_dir,
        disable_sampling,
        ignore_vgg,
        pretrain_learning_rate,
        pretrain_epochs,
        pretrain_saving_epochs,
        pretrain_reporting_steps,
        pretrain_generator_name,
        generator_name,
        discriminator_name,
        debug,
        **kwargs,
    ):
        self.debug = debug
        self.ascii = os.name == "nt"
        self.dataset_name = dataset_name
        self.light = light
        self.source_domain = source_domain
        self.target_domain = target_domain
        self.gan_type = gan_type
        self.epochs = epochs
        self.input_size = input_size
        self.multi_scale = multi_scale
        self.batch_size = batch_size
        self.sample_size = sample_size
        self.reporting_steps = reporting_steps
        self.content_lambda = float(content_lambda)
        self.style_lambda = float(style_lambda)
        self.g_adv_lambda = g_adv_lambda
        self.d_adv_lambda = d_adv_lambda
        self.generator_lr = generator_lr
        self.discriminator_lr = discriminator_lr
        self.data_dir = data_dir
        self.log_dir = log_dir
        self.result_dir = result_dir
        self.checkpoint_dir = checkpoint_dir
        self.generator_checkpoint_prefix = generator_checkpoint_prefix
        self.discriminator_checkpoint_prefix = discriminator_checkpoint_prefix
        self.pretrain_checkpoint_prefix = pretrain_checkpoint_prefix
        self.pretrain_model_dir = pretrain_model_dir
        self.model_dir = model_dir
        self.disable_sampling = disable_sampling
        self.ignore_vgg = ignore_vgg
        self.pretrain_learning_rate = pretrain_learning_rate
        self.pretrain_epochs = pretrain_epochs
        self.pretrain_saving_epochs = pretrain_saving_epochs
        self.pretrain_reporting_steps = pretrain_reporting_steps
        self.pretrain_generator_name = pretrain_generator_name
        self.generator_name = generator_name
        self.discriminator_name = discriminator_name

        self.logger = get_logger("Trainer", debug=debug)
        # NOTE: just minimal demonstration of multi-scale training
        self.sizes = [
            self.input_size - 32, self.input_size, self.input_size + 32
        ]

        if not self.ignore_vgg:
            self.logger.info("Setting up VGG19 for computing content loss...")
            from tensorflow.keras.applications import VGG19
            from tensorflow.keras.layers import Conv2D
            input_shape = (self.input_size, self.input_size, 3)
            # download model using kwarg weights="imagenet"
            base_model = VGG19(weights="imagenet",
                               include_top=False,
                               input_shape=input_shape)
            tmp_vgg_output = base_model.get_layer("block4_conv3").output
            tmp_vgg_output = Conv2D(512, (3, 3),
                                    activation='linear',
                                    padding='same',
                                    name='block4_conv4')(tmp_vgg_output)
            self.vgg = tf.keras.Model(inputs=base_model.input,
                                      outputs=tmp_vgg_output)
            self.vgg.load_weights(os.path.expanduser(
                os.path.join(
                    "~", ".keras", "models",
                    "vgg19_weights_tf_dim_ordering_tf_kernels_notop.h5")),
                                  by_name=True)
        else:
            self.logger.info(
                "VGG19 will not be used. "
                "Content loss will simply imply pixel-wise difference.")
            self.vgg = None

        self.logger.info(
            f"Setting up objective functions and metrics using {self.gan_type}..."
        )
        self.mae = tf.keras.losses.MeanAbsoluteError()
        self.generator_loss_object = tf.keras.losses.BinaryCrossentropy(
            from_logits=True)
        if self.gan_type == "gan":
            self.discriminator_loss_object = tf.keras.losses.BinaryCrossentropy(
                from_logits=True)
        elif self.gan_type == "lsgan":
            self.discriminator_loss_object = tf.keras.losses.MeanSquaredError()
        else:
            wrong_msg = f"Non-recognized 'gan_type': {self.gan_type}"
            self.logger.critical(wrong_msg)
            raise ValueError(wrong_msg)

        self.g_total_loss_metric = tf.keras.metrics.Mean("g_total_loss",
                                                         dtype=tf.float32)
        self.g_adv_loss_metric = tf.keras.metrics.Mean("g_adversarial_loss",
                                                       dtype=tf.float32)
        if self.content_lambda != 0.:
            self.content_loss_metric = tf.keras.metrics.Mean("content_loss",
                                                             dtype=tf.float32)
        if self.style_lambda != 0.:
            self.style_loss_metric = tf.keras.metrics.Mean("style_loss",
                                                           dtype=tf.float32)
        self.d_total_loss_metric = tf.keras.metrics.Mean("d_total_loss",
                                                         dtype=tf.float32)
        self.d_real_loss_metric = tf.keras.metrics.Mean("d_real_loss",
                                                        dtype=tf.float32)
        self.d_fake_loss_metric = tf.keras.metrics.Mean("d_fake_loss",
                                                        dtype=tf.float32)
        self.d_smooth_loss_metric = tf.keras.metrics.Mean("d_smooth_loss",
                                                          dtype=tf.float32)

        self.metric_and_names = [
            (self.g_total_loss_metric, "g_total_loss"),
            (self.g_adv_loss_metric, "g_adversarial_loss"),
            (self.d_total_loss_metric, "d_total_loss"),
            (self.d_real_loss_metric, "d_real_loss"),
            (self.d_fake_loss_metric, "d_fake_loss"),
            (self.d_smooth_loss_metric, "d_smooth_loss"),
        ]
        if self.content_lambda != 0.:
            self.metric_and_names.append(
                (self.content_loss_metric, "content_loss"))
        if self.style_lambda != 0.:
            self.metric_and_names.append(
                (self.style_loss_metric, "style_loss"))

        self.logger.info("Setting up checkpoint paths...")
        self.pretrain_checkpoint_prefix = os.path.join(
            self.checkpoint_dir, "pretrain", self.pretrain_checkpoint_prefix)
        self.generator_checkpoint_dir = os.path.join(
            self.checkpoint_dir, self.generator_checkpoint_prefix)
        self.generator_checkpoint_prefix = os.path.join(
            self.generator_checkpoint_dir, self.generator_checkpoint_prefix)
        self.discriminator_checkpoint_dir = os.path.join(
            self.checkpoint_dir, self.discriminator_checkpoint_prefix)
        self.discriminator_checkpoint_prefix = os.path.join(
            self.discriminator_checkpoint_dir,
            self.discriminator_checkpoint_prefix)
예제 #5
0
파일: views.py 프로젝트: devshree07/ipro
UPLOAD_FOLDER = './app/templates/static/uploads/'
SAVE_FOLDER = './app/templates/static/removal/'
STATIC_IMAGES = './app/static/assets/img/backgrounds/'

keras_weights_file = "app/pickles/model.h5"

ALLOWED_EXTENSIONS = set(['png', 'jpg', 'jpeg', 'gif'])
# provide login manager with load_user callback
custom_objects = {
    'PhraseLevelFeatures': PhraseLevelFeatures,
    'AttentionMaps': AttentionMaps,
    'ContextVector': ContextVector
    }
model = tf.keras.models.load_model(MODEL_PATH, custom_objects=custom_objects, compile=False)
vgg_model = VGG19(weights="imagenet", include_top=False, input_tensor=Input(shape=(224, 224, 3)))

stripe_keys = {
    'secret_key': 'sk_test_51Hhvu4Gqfu9fIci4SWDkVj1TUUGPWPDqAC88LEU8NAzy1RAPfftG7ogYRKhqCRdW1O2Ya9czzFkpmkmQyTuqVkSK00JSrnCPHW',
    'publishable_key': 'pk_test_51Hhvu4Gqfu9fIci4ulxp5s2Rx51IBlb6J3nf1mhfHcrdmv5pTimq9e31mjRGcrw9M0mpifD7oVchuAn03DoVJycR00IFXDPgFo'
}

stripe.api_key = stripe_keys['secret_key']

@lm.user_loader
def load_user(user_id):
    return User.query.get(int(user_id))

@app.route('/checkout.html', methods=['POST'])
def checkout():
    return render_template('accounts/checkout.html', amount=session["amount"])
예제 #6
0
def build_model(encoder='efficientnetb7', center='dac', full_skip=True, attention='sc', upscore='upall'):

	MODEL_NAME = encoder
	if center is not None:
		MODEL_NAME = MODEL_NAME+'_'+center
	if attention is not None:
		MODEL_NAME = MODEL_NAME+'_'+attention
	if full_skip:
		MODEL_NAME = MODEL_NAME + '_fullskip'
	if upscore is not None:
		MODEL_NAME = MODEL_NAME + '_'+upscore


	if encoder == 'resnet50':
		encoder = ResNet50(input_tensor=Input(shape=(IMAGE_SIZE, IMAGE_SIZE, 3), name='data'), weights='imagenet', include_top=False)
		skip_names = ['data', 'conv1_relu', 'conv2_block3_out', 'conv3_block4_out', 'conv4_block6_out']
		encoder_output = encoder.get_layer('conv5_block3_out').output
		# data    320x320x3
		# conv1_relu    160x160x64
		# conv2_block3_out     80x80x256
		# conv3_block4_out    40x40x512
		# conv4_block6_out    20x20x1024
		# conv5_block3_out    10x10x2048  --> encoder output

	elif encoder == 'resnet101':
		encoder = ResNet101(input_tensor=Input(shape=(IMAGE_SIZE, IMAGE_SIZE, 3), name='data'), weights='imagenet', include_top=False)
		skip_names = ['data', 'conv1_relu', 'conv2_block3_out', 'conv3_block4_out']
		encoder_output = encoder.get_layer('conv4_block23_out').output
		#data   320x320x3
		#conv1_relu   160x160x64
		#conv2_block3_out   80x80x256
		#conv3_block4_out    40x40x512
		#conv4_block23_out   20x20x1024 --> encoder output
		#conv5_block3_out  10x10x2048

	elif encoder == 'resnet50v2':
		encoder = ResNet50V2(input_tensor=Input(shape=(IMAGE_SIZE, IMAGE_SIZE, 3), name='data'), weights='imagenet', include_top=False)
		skip_names = ['data', 'conv1_conv', 'conv2_block3_1_relu', 'conv3_block4_1_relu', 'conv4_block6_1_relu']
		encoder_output = encoder.get_layer('post_relu').output
		# data   320x320x3
		# conv1_conv   160x160x64
		# conv2_block3_1_relu   80x80x64
		# conv3_block4_1_relu   40x40x128
		# conv4_block6_1_relu   20x20x256
		# post_relu   10x10x2048  --> encoder output

	elif encoder == 'resnet101v2':
		encoder = ResNet101V2(input_tensor=Input(shape=(IMAGE_SIZE, IMAGE_SIZE, 3), name='data'), weights='imagenet', include_top=False)
		skip_names = ['data', 'conv1_conv', 'conv2_block3_1_relu', 'conv3_block4_1_relu', 'conv4_block23_1_relu']
		encoder_output = encoder.get_layer('post_relu').output
		#data   320x320x3
		#conv1_conv   160x160x64
		#conv2_block3_1_relu   80x80x64
		#conv3_block4_1_relu    40x40x128
		#conv4_block23_1_relu   20x20x256 
		#post_relu  10x10x2048 --> encoder output

	elif encoder == 'vgg19':
		encoder = VGG19(input_shape=(IMAGE_SIZE, IMAGE_SIZE, 3), weights='imagenet', include_top=False)
		skip_names = ['block1_conv2', 'block2_conv2', 'block3_conv4', 'block4_conv4', 'block5_conv4']
		encoder_output = encoder.get_layer('block5_pool').output
		# block1_conv2   320x320x64
		# block2_conv2   160x160x128
		# block3_conv4   80x80x256
		# block4_conv4   40x40x512
		# block5_conv4   20x20x512
		# block5_pool   10x10x512   --> encoder output

	elif encoder == 'efficientnetb6':
		encoder = EfficientNetB6(input_tensor=Input(shape=(IMAGE_SIZE, IMAGE_SIZE, 3), name='data'), weights='imagenet', include_top=False)
		skip_names = ['data', 'block2a_expand_activation', 'block3a_expand_activation', 'block4a_expand_activation']
		encoder_output = encoder.get_layer('block6a_expand_activation').output
		#data   320x320x3
		#block2a_expand_activation   160x160x192
		#block3a_expand_activation   80x80x240
		#block4a_expand_activation    40x40x432
		#block6a_expand_activation   20x20x1200 --> encoder output
		#top_activation   10x10x2304

	elif encoder == 'efficientnetb7':
		encoder = EfficientNetB7(input_tensor=Input(shape=(IMAGE_SIZE, IMAGE_SIZE, 3), name='data'), weights='imagenet', include_top=False)
		skip_names = ['data', 'block2a_expand_activation', 'block3a_expand_activation', 'block4a_expand_activation']
		encoder_output = encoder.get_layer('block6a_expand_activation').output
		#data   320x320x3
		#block2a_expand_activation   160x160x192
		#block3a_expand_activation   80x80x288
		#block4a_expand_activation    40x40x480
		#block6a_expand_activation   20x20x1344 --> encoder output
		#top_activation   10x10x

	elif encoder == 'mobilenetv2':
		encoder = MobileNetV2(input_tensor=Input(shape=(IMAGE_SIZE, IMAGE_SIZE, 3), name='data'), weights='imagenet', include_top=False)
		skip_names = ['data', 'block_1_expand_relu', 'block_3_expand_relu', 'block_6_expand_relu', 'block_13_expand_relu']
		encoder_output = encoder.get_layer('out_relu').output
		# data   320x320x3
		# block_1_expand_relu   160x160x96
		# block_3_expand_relu   80x80x144
		# block_6_expand_relu    40x40x192
		# block_13_expand_relu   20x20x576
		# out_relu   10x10x1248   --> encoder output

	skip_layers = [encoder.get_layer(i).output for i in skip_names]
	# Center --------------
	if center == 'atrous':
		x = atrous_block(encoder_output)
	elif center == 'dac':
		x = dense_atrous_block(encoder_output)
	elif center == 'aspp':
		x = aspp_block(encoder_output)
	elif center is None:
		x = encoder_output

    # Decoder --------------
	if attention == 'se':
		attn_block = se_block
	elif attention == 'cbam':
		attn_block = cbam_block
	elif attention == 'sc':
		attn_block = scSE_block

	filters = [i.shape[-1] for i in skip_layers]
	filters[0] = 64

	scales = [2 ** i for i in range(1, len(filters))][::-1]
	X = []
	for i in range(1, len(filters) + 1):
		X.append(x)

		down = []
		if full_skip:
			for j in range(len(scales) - (i - 1)):
				d = down_skip(skip_layers[j], scales[j + (i - 1)], filters[-1]//4)
				if attention is not None:
					d = attn_block(d) 
				down.append(d)


		direct = direct_skip(skip_layers[-i], filters[-1]//4)
		if attention is not None:
			direct = attn_block(direct)


		x = convtranspose_block(x, filters[-1]//4)
		if attention is not None:
			x = attn_block(x)

		x = Concatenate()([x] + [direct] + down)
		
		x = conv3_block(x, x.shape[-1])

	if upscore is not None:
		if upscore=='upall':
			up_scales=[2 ** i for i in range(1, len(filters)+1)][::-1]
			UP = [upscore_block(x, 32, up_scales[i]) for i, x in enumerate(X)]
			if attention is not None:
				UP = [attn_block(x) for x in UP]

			up = Concatenate()(UP)
     
		elif upscore=='upcenter':
			up = upscore_block(X[0], 64, 2 ** len(filters))
			if attention is not None:
				up = attn_block(up)

		x = Concatenate()([x, up])


	x = Conv2D(1, 1, padding='same')(x)
	x = Activation('sigmoid')(x)

	model = Model(encoder.input, x)

	metrics = [dice_coef, Recall(), Precision()]
	opt = Nadam(LR)
	model.compile(loss=bce_dice_loss, optimizer=opt, metrics=metrics)

	return model, MODEL_NAME
예제 #7
0
def build_model(mode, model_name=None, model_path=None):

    clear_session()

    if mode == 'train':
        img = Input(
            shape=(96, 96,
                   3))  # ResNet50 minimum size (32,32) for others (128,128)

        if model_name == 'DenseNet121':  #Checked and Working

            model = DenseNet121(include_top=False,
                                weights='imagenet',
                                input_tensor=img,
                                input_shape=None,
                                pooling='avg')

        elif model_name == 'MobileNet':  #checked, raised shape error, #Error Resolved, Now working

            model = MobileNet(include_top=True,
                              weights='imagenet',
                              input_tensor=img,
                              input_shape=None,
                              pooling='avg')

        elif model_name == 'Xception':  #Checked and Working

            model = Xception(include_top=False,
                             weights='imagenet',
                             input_tensor=img,
                             input_shape=None,
                             pooling='max')

        elif model_name == 'ResNet50':  #Image Dimesion size should be high eg 224x224, not sufficient GPU memory resource

            model = ResNet50(include_top=False,
                             weights='imagenet',
                             input_tensor=img,
                             input_shape=None,
                             pooling='avg')

        elif model_name == 'InceptionV3':  #Checked and Working

            model = InceptionV3(include_top=False,
                                weights='imagenet',
                                input_tensor=img,
                                input_shape=(None),
                                pooling='avg')
        elif model_name == 'VGG19':  #to be checked

            model = InceptionV4(include_top=False,
                                weights='imagenet',
                                input_tensor=img,
                                input_shape=None,
                                pooling='avg')

        elif model_name == 'VGG16':  #Checked and Working
            model = VGG16(include_top=False,
                          weights='imagenet',
                          input_tensor=img,
                          input_shape=(None),
                          pooling='max')

        elif model_name == 'VGG19':  #to be checked

            model = VGG19(include_top=False,
                          weights='imagenet',
                          input_tensor=img,
                          input_shape=None,
                          pooling='avg')

        final_layer = model.layers[-1].output

        dense_layer_1 = Dense(128, activation='relu')(final_layer)
        output_layer = Dense(4, activation='softmax')(dense_layer_1)

        model = Model(inputs=[img], outputs=output_layer)
        model.compile(optimizer='adam',
                      loss='binary_crossentropy',
                      metrics=['accuracy'])

    elif mode == 'inference':
        model = load_model(model_path)

    return model
예제 #8
0
                                                    y,
                                                    train_size=0.8,
                                                    shuffle=True,
                                                    random_state=42)

aaa = 1
x_train = x_train.reshape(x_train.shape[0], x_train.shape[1], x_train.shape[2],
                          aaa)
x_test = x_test.reshape(x_test.shape[0], x_test.shape[1], x_test.shape[2], aaa)
print(x_train.shape, y_train.shape)  # (3628, 128, 862, 1) (3628,)
print(x_test.shape, y_test.shape)  # (908, 128, 862, 1) (908,)

model = VGG19(
    include_top=True,
    input_shape=(128, 862, 1),
    classes=2,
    pooling=None,
    weights=None,
)

model.summary()
# model.trainable = False

model.save('C:/nmb/nmb_data/h5/5s/vgg19/vgg19_adadelta_2.h5')

# 컴파일, 훈련
op = Adadelta(lr=1e-3)
batch_size = 4

es = EarlyStopping(monitor='val_loss',
                   patience=20,
예제 #9
0
                              validation_data=validation_generator)

fig, ax = plt.subplots(2, 2, sharex=True, figsize=(20, 10))
fig.set_size_inches(14., 7.)
plot_ax(ax[0][0], 'loss', 'Loss', 200)
plot_ax(ax[0][1], 'accuracy', 'Akurasi', 200)
plot_ax(ax[1][0], 'prec', 'Presisi', 200)
plot_ax(ax[1][1], 'rec', 'Recall', 200)
fig = plt.gcf()
plt.suptitle('Grafik Setiap Metrics (InceptionV3)\n(sumbu x adalah epoch)')
plt.show()

model.save('/content/drive/My Drive/Kompetisi/SATRIA DATA 2020/SEC/Inceptrans.h5')

# VGG19
vgg19_model=VGG19(input_shape=(300,300,3),include_top=False)
vgg19_model.trainable=False
vgg19_model.summary()

last_output=vgg19_model.output
print('last layer output shape: ', vgg19_model.output_shape)

# Flatten the output layer to 1 dimension
x = layers.Flatten()(last_output)
x = layers.Dense(512, activation='relu')(x)
x = layers.Dropout(0.1)(x)
x = layers.Dense(1, activation='sigmoid')(x)

model = Model(vgg19_model.input, x)
model.summary
예제 #10
0
def createGAN(generator, discriminator):
    '''对抗网'''
    discriminator.trainable = False
    #生成器输入
    lowImg = generator.input
    #生成器输出
    fakeHighImg = generator(lowImg)
    #生成器判断
    judge = discriminator(fakeHighImg)
    model = Model(inputs=lowImg, outputs=[judge, fakeHighImg])
    model.summary()
    return model


#特征提取器
vgg19 = VGG19(include_top=False, weights='imagenet')
vgg19 = Model(vgg19.input, vgg19.output)


def restoreImg(img):
    '''还原图片'''
    img = (img / 2 + 0.5) * 255
    return img


def contentLoss(y_true, y_pred):
    '''内容损失'''
    y_true = restoreImg(y_true)
    y_pred = restoreImg(y_pred)
    y_true = preprocess_input(y_true)
    y_pred = preprocess_input(y_pred)
예제 #11
0
def uvgg19(input_size):
    if input_size[-1] == 1:
        input_size = (input_size[0], input_size[1], 3)
    vgg19 = VGG19(include_top=False,
                  weights='imagenet',
                  input_tensor=None,
                  input_shape=input_size,
                  pooling=None)
    encoder = Model(vgg19.input,
                    vgg19.get_layer("block5_conv4").output,
                    name="encoder")

    d_i = Input(shape=(encoder.output.shape[1:]), name='decoder_input')
    block5_up = UpSampling2D(size=(2, 2), name="block5_up")(d_i)

    block4_1_conv0 = Conv2D(filters=512,
                            kernel_size=3,
                            padding='same',
                            activation='relu',
                            name="block4_1_conv0")(block5_up)
    block4_merge = Concatenate(axis=-1, name="block4_merge")(
        [vgg19.get_layer("block4_conv4").output, block4_1_conv0])
    block4_1_conv1 = Conv2D(filters=512,
                            kernel_size=3,
                            padding='same',
                            activation='relu',
                            name="block4_1_conv1")(block4_merge)
    block4_1_conv2 = Conv2D(filters=512,
                            kernel_size=3,
                            padding='same',
                            activation='relu',
                            name="block4_1_conv2")(block4_1_conv1)
    block4_1_conv3 = Conv2D(filters=512,
                            kernel_size=3,
                            padding='same',
                            activation='relu',
                            name="block4_1_conv3")(block4_1_conv2)
    block4_1_conv4 = Conv2D(filters=512,
                            kernel_size=3,
                            padding='same',
                            activation='relu',
                            name="block4_1_conv4")(block4_1_conv3)
    block4_up = UpSampling2D(size=(2, 2), name="block4_up")(block4_1_conv4)

    block3_1_conv0 = Conv2D(filters=256,
                            kernel_size=3,
                            padding='same',
                            activation='relu',
                            name="block3_1_conv0")(block4_up)
    block3_merge = Concatenate(axis=-1, name="block3_merge")(
        [vgg19.get_layer("block3_conv4").output, block3_1_conv0])
    block3_1_conv1 = Conv2D(filters=256,
                            kernel_size=3,
                            padding='same',
                            activation='relu',
                            name="block3_1_conv1")(block3_merge)
    block3_1_conv2 = Conv2D(filters=256,
                            kernel_size=3,
                            padding='same',
                            activation='relu',
                            name="block3_1_conv2")(block3_1_conv1)
    block3_1_conv3 = Conv2D(filters=256,
                            kernel_size=3,
                            padding='same',
                            activation='relu',
                            name="block3_1_conv3")(block3_1_conv2)
    block3_1_conv4 = Conv2D(filters=256,
                            kernel_size=3,
                            padding='same',
                            activation='relu',
                            name="block3_1_conv4")(block3_1_conv3)
    block3_up = UpSampling2D(size=(2, 2), name="block3_up")(block3_1_conv4)

    block2_1_conv0 = Conv2D(filters=128,
                            kernel_size=3,
                            padding='same',
                            activation='relu',
                            name="block2_1_conv0")(block3_up)
    block2_merge = Concatenate(axis=-1, name="block2_merge")(
        [vgg19.get_layer("block2_conv2").output, block2_1_conv0])
    block2_1_conv1 = Conv2D(filters=128,
                            kernel_size=3,
                            padding='same',
                            activation='relu',
                            name="block2_1_conv1")(block2_merge)
    block2_1_conv2 = Conv2D(filters=128,
                            kernel_size=3,
                            padding='same',
                            activation='relu',
                            name="block2_1_conv2")(block2_1_conv1)
    block2_up = UpSampling2D(size=(2, 2), name="block2_up")(block2_1_conv2)

    block1_1_conv0 = Conv2D(filters=64,
                            kernel_size=3,
                            padding='same',
                            activation='relu',
                            name="block1_1_conv0")(block2_up)
    block1_merge = Concatenate(axis=-1, name="block1_merge")(
        [vgg19.get_layer("block1_conv2").output, block1_1_conv0])
    block1_1_conv1 = Conv2D(filters=64,
                            kernel_size=3,
                            padding='same',
                            activation='relu',
                            name="block1_1_conv1")(block1_merge)
    block1_1_conv2 = Conv2D(filters=64,
                            kernel_size=3,
                            padding='same',
                            activation='relu',
                            name="block1_1_conv2")(block1_1_conv1)
    block1_1_conv3 = Conv2D(filters=2,
                            kernel_size=3,
                            padding='same',
                            activation='relu',
                            name="block1_1_conv3")(block1_1_conv2)
    output = Conv2D(filters=1,
                    kernel_size=1,
                    activation='sigmoid',
                    name="output")(block1_1_conv3)

    decoder = Model([vgg19.input, d_i], output, name="decoder")

    decoder_output = decoder([vgg19.input, encoder(encoder.input)])
    model = Model(encoder.input, decoder_output, name=vgg19.name)

    return model
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
import numpy as np
from tensorflow.keras.applications import VGG19

#  graph of layers you are manipulating in the Functional API is a static datastructure

vgg19 = VGG19()

features_list = [layer.output for layer in vgg19.layers]
예제 #13
0
    output_image = cv.cvtColor(output_image, cv.COLOR_BGR2RGB)

    cam = cv.cvtColor(cam, cv.COLOR_BGR2RGB)
    return output_image, cam


# %%
# Read image
img = cv.imread('./cat_dog.jpg')
img = cv.cvtColor(img, cv.COLOR_BGR2RGB)
img = cv.resize(img, (224, 224))

# %%
# Load model

model = VGG19(weights='imagenet')

model.summary()

# After prinintg, copy layer's name
# %%
# 174 tabby
# 211 German_shepherd

overlaped, cam = grad_cam(model, 'block5_pool', 174, img[np.newaxis] / 255.)

plt.figure(figsize=(15, 5))
plt.suptitle(f"About tabby")
plt.subplot(1, 3, 1)
plt.imshow(img)
plt.subplot(1, 3, 2)
예제 #14
0
C = Struct(**C)

input_shape_img = (224, 224, 3)

img_input = Input(shape=input_shape_img)

print('Loading pre-trained weights...')
if C.network == 'vgg16':
    from src.architectures import vgg16 as nn
    base_layers = VGG16(weights=None,
                        include_top=False,
                        input_tensor=img_input)
elif C.network == 'vgg19':
    from src.architectures import vgg19 as nn
    base_layers = VGG19(weights=None,
                        include_top=False,
                        input_tensor=img_input)
elif C.network == 'resnet50':
    from src.architectures import resnet50 as nn
    base_layers = ResNet50(weights=None,
                           include_top=False,
                           input_tensor=img_input)
elif C.network == 'resnet152':
    from src.architectures import resnet152 as nn
    base_layers = ResNet152(weights=None,
                            include_top=False,
                            input_tensor=img_input)

with tf.device(device):

    print('Loading weights from {}'.format(options.weights))
예제 #15
0
    X = tensor2numpy('./data/', train_set, srgan)
    x = [X[i] for i in X.keys()]
    train = np.array(x, dtype = "float64")
    y = create_onehot(X)
    history = cnn.fit(train, y, batch_size=32, epochs=5, callbacks=callbacks_list, validation_split=0.2)
    # Plot training & validation accuracy values
    plt.plot(history.history['loss'])
    plt.plot(history.history['val_loss'])
    plt.title('Model loss')
    plt.ylabel('Loss')
    plt.xlabel('Epoch')
    plt.legend(['Train', 'Test'], loc='upper left')
    plt.show()

"""Upload, use transfer learning"""
VGG=VGG19(input_shape=(224,224,3),include_top=False,weights='imagenet')
#freeze NatNetMobile weights
for layer in VGG.layers:
    layer.trainable=False

output=VGG.layers[6].output
output=Flatten()(output)
output=Dense(1024,activation='relu')(output)
output=Dense(512,activation='relu')(output)
output=Dropout(0.5)(output)
output=Dense(256,activation='relu')(output)
output=Dense(101,activation='softmax')(output)
VGG=Model(VGG.input,output)
VGG.compile(optimizer='adam',loss='categorical_crossentropy',metrics=['accuracy'])
plot_model(VGG, to_file='./model.png', show_shapes=True, show_layer_names=True)
예제 #16
0
 def __init__(self, content_layer):
     super().__init__()
     vgg = VGG19(include_top=False, weights='imagenet')
     self.vgg = tf.keras.Model([vgg.input],
                               [vgg.get_layer(content_layer).output])
     self.vgg.trainable = False
예제 #17
0
def build_vgg_extractor():
    vgg = VGG19(include_top=False, weights='imagenet')
    content_layers = ['block1_conv1','block1_conv2','block2_conv1','block2_conv2','block3_conv1']
    lossModel = tf.keras.Model(inputs=vgg.input, outputs=[vgg.get_layer(x).output for x in content_layers])
    lossModel.trainable = False
    return lossModel
예제 #18
0
    def CNN_model(self, learning_rate, epoch, batchsize, whether_Adam, Momentum_gamma, weight_decay, whether_load, cnn_type):
        """
        Resnet model
        :param learning_rate
        :param epoch
        :param batchsize
        :param whether_Adam: whether to perform Adam optimiser, if not perform Momentum
        :param Momentum gamma: a variable of Momentum
        :param weight_decay: weight decay for Momentum
        :param whether_load: whether to load trained Resnet model in if it exists (or cover it)
        """

        test_cnn_mfcc = self.train_mfcc
        test_cnn_label = self.train_label

        if(isfile("model/resnet_label.hdf5") and whether_load):
            self.cnn_model = load_model("model/resnet_label.hdf5")
        else:
            train_cnn_mfcc = self.test_mfcc
            train_cnn_label = self.test_label
            val_cnn_mfcc = self.validate_mfcc
            val_cnn_label = self.validate_label

            # input
            input = Input(shape=(self.test_mfcc.shape[1], self.test_mfcc.shape[2], 1))

            # Concatenate -1 dimension to be three channels, to fit the input need in ResNet50
            input_concate = Concatenate()([input,input,input])

            # CNN series network (VGG+Resnet)
            # reference: https://keras.io/api/applications/
            if(cnn_type == 'ResNet50'):
                from tensorflow.keras.applications import ResNet50
                cnn_output = ResNet50(pooling = 'avg')(input_concate)
            elif(cnn_type == 'ResNet101'):
                from tensorflow.keras.applications import ResNet101
                cnn_output = ResNet101(pooling = 'avg')(input_concate)
            elif(cnn_type == 'ResNet152'):
                from tensorflow.keras.applications import ResNet152
                cnn_output = ResNet152(pooling = 'avg')(input_concate)
            elif(cnn_type == 'ResNet50V2'):
                from tensorflow.keras.applications import ResNet50V2
                cnn_output = ResNet50V2(pooling = 'avg')(input_concate)
            elif(cnn_type == 'ResNet101V2'):
                from tensorflow.keras.applications import ResNet101V2
                cnn_output = ResNet101V2(pooling = 'avg')(input_concate)
            elif(cnn_type == 'ResNet152V2'):
                from tensorflow.keras.applications import ResNet152V2
                cnn_output = ResNet152V2(pooling = 'avg')(input_concate)
            elif(cnn_type == 'VGG16'):
                # width and height should not smaller than 32
                from tensorflow.keras.applications import VGG16
                cnn_output = VGG16(include_top = False, pooling = 'avg')(input_concate)
                cnn_output = Flatten()(cnn_output)
            elif(cnn_type == 'VGG19'):
                # width and height should not smaller than 32
                from tensorflow.keras.applications import VGG19
                cnn_output = VGG19(include_top = False, pooling = 'avg')(input_concate)
                cnn_output = Flatten()(cnn_output)
            else:
                # CNN layers we design
                print("No recognised CNN network. The CNN layers we designed are performed")
                # convolution layers
                conv_output1 = Conv2D(filters=32, strides=(1, 1), kernel_size=5, activation='relu')(input)
                # pool_output1 = MaxPool2D(pool_size=(2, 2))(conv_output1)
                conv_output2 = Conv2D(filters=8, strides=(2, 2), kernel_size=4, activation='relu')(conv_output1)

                conv_output2 = Dropout(0.2)(conv_output2)

                conv_output2_batch = BatchNormalization()(conv_output2)

                cnn_output = Flatten()(conv_output2_batch)
                cnn_output = Flatten()(cnn_output)


            # dense with sigmoid
            Dense_sigmoid = Dense(24, activation='sigmoid')(cnn_output)

            Dense_sigmoid = Dropout(0.2)(Dense_sigmoid)

            # dense output
            output = Dense(self.test_label.shape[1], activation='softmax')(Dense_sigmoid)

            # cnn model for labels recognision
            self.cnn_model = Model(input, output)

            # optimizer
            if whether_Adam:
                optimizer = optimizers.Adam(lr=learning_rate, beta_1 = Momentum_gamma, decay=weight_decay)
            else:
                optimizer = optimizers.SGD(lr=learning_rate, momentum=Momentum_gamma, nesterov=True, decay=weight_decay)
            self.cnn_model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['mse', 'accuracy'])
            start = time.time()
            self.history = self.cnn_model.fit(train_cnn_mfcc, train_cnn_label, epochs=epoch, batch_size=batchsize, validation_data=[val_cnn_mfcc,val_cnn_label])
            self.training_time = time.time() - start
            self.cnn_model.save("model/resnet_label.hdf5")

        # model evaluation
        self.cnn_model.predict(test_cnn_mfcc)
        self.score = self.cnn_model.evaluate(test_cnn_mfcc, test_cnn_label)
        print("test loss: ", self.score[0], ", mse: ", self.score[1], ", accuracy", self.score[2])
예제 #19
0
def train_model(path,
                train_images=None,
                train_labels=None,
                test_images=None,
                test_labels=None,
                model_name=None,
                epochs=80,
                learning_rate=0.0001,
                input_shape=(224, 224, 3),
                classes=2,
                batch_size=16,
                classifier_activation='softmax',
                callbacks=None):
    '''    
    saves the model as .h5 file\n  
    path = directory for saving the files
    train_images = a numpy array containing the image data for training\n
    train_labels = a numpy array containing the labels for training\n
    test_images = a numpy array containing the image data for test\n
    test_labels = a numpy array containing the labels for test\n
    model_name = a string, name of the model -> "vgg19", "resnet50_v2", "inception_resnet_v2", "densenet201", "inception_v3", "xception", "mobilenet_v2"\n
    epochs\n
    learning_rate\n        
    '''

    base_model = None
    if model_name == 'vgg19':
        base_model = VGG19(weights=None,
                           include_top=False,
                           input_shape=input_shape)

    if model_name == 'resnet50_v2':
        base_model = ResNet50V2(weights=None,
                                include_top=False,
                                input_shape=input_shape)

    if model_name == 'inception_resnet_v2':
        base_model = InceptionResNetV2(weights=None,
                                       include_top=False,
                                       input_shape=input_shape)

    if model_name == 'densenet201':
        base_model = DenseNet201(weights=None,
                                 include_top=False,
                                 input_shape=input_shape)

    if model_name == 'inception_v3':
        base_model = InceptionV3(weights=None,
                                 include_top=False,
                                 input_shape=input_shape)

    if model_name == 'xception':
        base_model = Xception(weights=None,
                              include_top=False,
                              input_shape=input_shape)

    if model_name == 'mobilenet_v2':
        base_model = MobileNetV2(weights=None,
                                 include_top=False,
                                 input_shape=input_shape)

    x = base_model.output
    x = tf.keras.layers.GlobalAveragePooling2D()(x)
    output = tf.keras.layers.Dense(classes,
                                   activation=classifier_activation)(x)

    model = tf.keras.Model(inputs=base_model.input, outputs=output)

    optimizer = Adam(learning_rate=learning_rate,
                     beta_1=0.9,
                     beta_2=0.999,
                     epsilon=1e-07)

    model.compile(
        optimizer=optimizer,
        loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
        metrics=['accuracy'])

    results = model.fit(train_images,
                        train_labels,
                        epochs=epochs,
                        validation_data=(test_images, test_labels),
                        batch_size=batch_size,
                        callbacks=callbacks)

    #losses = pd.DataFrame(model.history.history)
    #losses[['loss','val_loss']].plot()

    save_model = path + model_name + '.h5'
    model.save(save_model)

    return results
from tensorflow.keras.datasets import cifar10
from tensorflow.keras.utils import to_categorical

#1. 데이터
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)

x_train = x_train.reshape(x_train.shape[0], x_train.shape[1], x_train.shape[2],
                          3).astype('float32') / 255.
x_test = x_test.reshape(x_test.shape[0], x_test.shape[1], x_test.shape[2],
                        3).astype('float32') / 255.

#2. 모델
t = VGG19(weights='imagenet',
          include_top=False,
          input_shape=(x_train.shape[1], x_train.shape[2], 3))
t.trainable = False  #학습시키지 않겠다 이미지넷 가져다가 그대로 쓰겠다
# model.trainable=True

model = Sequential()
model.add(t)
model.add(Flatten())
model.add(Dense(256))
model.add(BatchNormalization())
model.add(Dropout(0.2))
model.add(Activation('relu'))
model.add(Dense(256))
model.add(Dense(10, activation='softmax'))

model.compile(loss='categorical_crossentropy',
예제 #21
0
def generate_vgg_model_large(classes_len: int):
    """
    Function to create a VGG19 model pre-trained with custom FC Layers at the start of the network plus optional layers at
    the end before the fully connected ones as well
    If the "advanced" command line argument is selected, adds an extra convolutional layer with extra filters to support
    larger images.
    This model is a larger model that starts with two more sets of convolutional layers with less filters 
    :param classes_len: The number of classes (labels).
    :return: The VGG19 model.
    """

    model_base = Sequential()

    # Reconfigure single channel input into a greyscale 3 channel input
    img_input = Input(shape=(config.VGG_IMG_SIZE_LARGE['HEIGHT'],
                             config.VGG_IMG_SIZE_LARGE['WIDTH'], 1))
    img_conc = Concatenate()([img_input, img_input, img_input])
    input_model = Model(inputs=img_input, outputs=img_conc)

    # Generate extra convolutional layers for model to put at the beginning
    model_base.add(input_model)
    model_base.add(Conv2D(16, (3, 3), activation='relu', padding='same'))

    model_base.add(Conv2D(16, (3, 3), activation='relu', padding='same'))

    model_base.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model_base.add(Conv2D(32, (3, 3), activation='relu', padding='same'))

    model_base.add(Conv2D(32, (3, 3), activation='relu', padding='same'))

    model_base.add(MaxPooling2D((2, 2), strides=(2, 2)))

    # To ensure model fits with vgg model, we can remove the first layer from the vgg model to replace with this
    model_base.add(Conv2D(64, (3, 3), activation='relu', padding='same'))

    # Generate a VGG19 model with pre-trained ImageNet weights, input as given above, excluded fully connected layers.
    vgg_model = VGG19(include_top=False,
                      weights='imagenet',
                      input_shape=[
                          config.VGG_IMG_SIZE['HEIGHT'],
                          config.VGG_IMG_SIZE['HEIGHT'], 3
                      ])

    # Crop vgg model to exlude input layer and first convolutional layer
    vgg_model_cropped = Sequential()
    for layer in vgg_model.layers[2:]:  # go through until last layer
        vgg_model_cropped.add(layer)

    # Combine the models
    combined_model = Sequential()
    combined_model.add(model_base)
    combined_model.add(vgg_model_cropped)

    # Add fully connected layers
    model = Sequential()
    # Start with base model consisting of convolutional layers
    model.add(combined_model)

    # Generate additional convolutional layers
    if config.model == "advanced":
        model.add(Conv2D(1024, (3, 3), activation='relu', padding='same'))
        model.add(Conv2D(1024, (3, 3), activation='relu', padding='same'))
        model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    # Flatten layer to convert each input into a 1D array (no parameters in this layer, just simple pre-processing).
    model.add(Flatten())

    # Add fully connected hidden layers.
    model.add(Dense(units=512, activation='relu', name='Dense_Intermediate_1'))
    model.add(Dense(units=32, activation='relu', name='Dense_Intermediate_2'))

    # Possible dropout for regularisation can be added later and experimented with:
    # model.add(Dropout(0.1, name='Dropout_Regularization'))

    # Final output layer that uses softmax activation function (because the classes are exclusive).
    if classes_len == 2:
        model.add(Dense(1, activation='sigmoid', name='Output'))
    else:
        model.add(Dense(classes_len, activation='softmax', name='Output'))

    # Print model details if running in debug mode.
    if config.verbose_mode:
        model.summary()

    return model
예제 #22
0
<br>



#### VGG-19




<br>
"""

# VGG-19 architecture
vgg = VGG19(include_top=False,
            weights='imagenet',
            input_shape=(32, 32, 3),
            pooling='max',
            classes=10)

# setting trainable layers in VGG-19

for layer in vgg.layers[0:15]:
    layer.trainable = True

for layer in vgg.layers[15:]:
    layer.trainable = False

# Summary VGG-19
print(vgg.summary())

# Build CNN
예제 #23
0
import time
from timeit import timeit

import numpy as np
import tensorflow as tf
from numpy.random import randn
from tensorflow.keras.applications import VGG19
from tensorflow.keras.losses import MAE

physical_devices = tf.config.experimental.list_physical_devices('GPU')
tf.config.experimental.set_memory_growth(physical_devices[0], True)
tf.config.optimizer.set_jit(True)  # XLA enabled

m = VGG19()
m.compile(optimizer='adam', loss=MAE)


def benchmark_tensorflow(batchsize):
    ip = tf.convert_to_tensor(np.array(randn(*(batchsize, 224, 224, 3)), dtype=np.float32))

    # warm-up
    m.predict(ip)

    time.sleep(10)

    # benchmark
    print(timeit(lambda: m.predict(ip), number=10))
validation_set = train_datagen.flow_from_directory(train_path,
                                                   target_size=(224, 224),
                                                   batch_size=32,
                                                   class_mode='categorical',
                                                   shuffle=True,
                                                   subset='validation')

# In[4]:

from tensorflow.keras.applications import VGG19
from tensorflow.keras.layers import GlobalAveragePooling2D, Dropout

## We are initialising the input shape with 3 channels rgb and weights as imagenet and include_top as False will make to use our own custom inputs

mv = VGG19(input_shape=IMAGE_SIZE + [3], weights='imagenet', include_top=False)

# In[5]:

for layers in mv.layers:
    layers.trainable = False

# In[6]:

x = Flatten()(mv.output)
prediction = Dense(3, activation='softmax')(x)

# In[7]:

model = Model(inputs=mv.input, outputs=prediction)
    epochs=epochs,
    validation_data=val_generator,
    workers=4
)

score = model.evaluate(val_generator,verbose=2)
print('Test loss:', score[0])
print('Test accuracy:', score[1])

"""## VGG19

"""

from tensorflow.keras.applications import VGG19

net= VGG19(include_top=False, weights='imagenet', input_tensor=Input(shape=(150,150,3))) 

for layer in net.layers[:-5]:
    layer.trainable = False

x = net.output
x = Flatten()(x)
x = Dropout(0.5)(x)
output_layer = Dense(1, activation='sigmoid', name='sigmoid')(x)
model = Model(inputs=net.input, outputs=output_layer)

# initiate RMSprop optimizer
opt = keras.optimizers.RMSprop(lr=0.0001, decay=1e-6)

# Train the model using RMSprop
model.compile(loss='binary_crossentropy',
예제 #26
0
from tensorflow.keras.utils import to_categorical
from sklearn.metrics import confusion_matrix
from tensorflow.keras.models import Model

import tensorflow.keras.backend as K
import matplotlib.pyplot as plt
import tensorflow as tf
import numpy as np
import os, re, cv2
import itertools

#Defining the model objects for ResNet50 and VGG19

K.clear_session()
base_model = ResNet50(include_top=False, weights='imagenet', input_shape=(32,32,3))
base_model_vgg = VGG19(include_top=False, weights='imagenet', input_shape=(32,32,3))

#Making the VGG19 Dense layers non-trainable

for layer in base_model_vgg.layers:
    layer.trainable= False

base_model_vgg.summary()

#Adding some customized layers for the model

y1 = base_model_vgg.output
y1 = Flatten()(y1)
y1 = BatchNormalization()(y1)
y1 = Dense(128,activation='relu')(y1)
y1 = Dropout(0.3)(y1)
예제 #27
0
def mini_vgg(layer_names):
    vgg = VGG19(weights='imagenet', include_top=False)
    vgg.trainable = False
    outputs = [vgg.get_layer(name).output for name in layer_names]
    model = Model([vgg.input], outputs)
    return model
예제 #28
0
(trainX, testX, trainY, testY) = train_test_split(imagens,
                                                  labels,
                                                  test_size=0.20,
                                                  stratify=labels,
                                                  random_state=random_state)

# DATA AUGMENTATION
train_datagen = ImageDataGenerator(rotation_range=20, zoom_range=0.2)

train_datagen.fit(trainX)
data_aug = train_datagen.flow(trainX, trainY, batch_size=batch_size)

# TRANSFER LEARNING
conv_base = VGG19(weights='imagenet',
                  include_top=False,
                  input_shape=input_shape)

conv_base.summary()

#  Retreinando parte da VGG19
conv_base.trainable = True
set_trainable = False

for layer in conv_base.layers:
    if layer.name == 'block5_conv1':
        set_trainable = True
    if set_trainable:
        layer.trainable = True
    else:
        layer.trainable = False
x_test = scaler.transform(x_test)
x_val = scaler.transform(x_val)

#to_categorical
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)
y_val = to_categorical(y_val)

#reshape
x_train = x_train.reshape(-1, 32, 32, 3)
x_test = x_test.reshape(-1, 32, 32, 3)
x_val = x_val.reshape(-1, 32, 32, 3)
print(x_train.shape, x_test.shape, x_val.shape)

#2. 모델링
vgg16 = VGG19(weights='imagenet', include_top=False,
              input_shape=(32, 32, 3))  #레이어 16개
vgg16.trainable = False  #훈련시키지 않고 가중치만 가져오겠다.
model = Sequential()
model.add(vgg16)
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dense(64, activation='relu'))
model.add(Dense(10,
                activation='softmax'))  #activation='softmax')) #mnist사용할 경우
model.summary()
print(len(vgg16.weights))  # 26
print(len(vgg16.trainable_weights))  # 0

#컴파일, 훈련
from tensorflow.keras.callbacks import EarlyStopping
예제 #30
0
 def vgg_model(self):
     layers = self.style_layers + self.content_layers
     vgg = VGG19(include_top=False, weights="imagenet")
     outputs = [vgg.get_layer(layer).output for layer in layers]
     model = Model([vgg.input], outputs)
     return model