Exemplo n.º 1
0
def build_model(args):
    if args.model == "VGG19":
        model = VGG19(num_classes=7, input_shape=(48, 48, 3), dropout=0.5)

    else:
        model = build_resnet(args.model, input_shape=(48, 48, 3), classes=7)
    return model
Exemplo n.º 2
0
def test():
    vgg = VGG19(2)

    # writer = tf.train.SummaryWriter("logs", graph=tf.get_default_graph())

    data = np.load('cat_affine_data.npy')
    # data = np.load('dog_affine_data.npy')

    for i in range(200):
        loss, params, images = vgg.train(data)
        print("Iteration " + str(i) + ": " + str(loss))
        print("Params: " + str(params))

    plt.figure(1)
    plt.imshow(data[0, :, :, 0])
    plt.title('image1')

    plt.figure(2)
    plt.imshow(data[0, :, :, 1])
    plt.title('image2')

    plt.figure(3)
    plt.imshow(images[0, :, :, 0])
    plt.title("transformed")

    plt.show()
Exemplo n.º 3
0
def build_model(args):  # hàm truyền vào các tham số để xây model
    if args.model == "VGG19":
        model = VGG19(num_classes=7, input_shape=(48, 48, 3), dropout=0.5)

    else:
        model = build_resnet(args.model, input_shape=(48, 48, 3), classes=7)
    return model
Exemplo n.º 4
0
def get_vgg_features(input, layers, input_shape):
    if len(K.int_shape(input)) == 3:
        input = K.expand_dims(input, axis=0)
    input = preprocess_input(input)
    vgg = VGG19(input, input_shape)
    outputs = [layer.output for layer in vgg.layers if layer.name in layers]
    return outputs
Exemplo n.º 5
0
def classifier(img):
    img = img.convert('RGB')
    img = trf.resize(img, (64, 64))
    img = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize(mean=(0.5071, 0.4867, 0.4408),
                             std=(0.2675, 0.2565, 0.2761))
    ])(img)
    img = torch.reshape(img, (1, 3, 64, 64))
    net = VGG19(num_class=172)
    net.load_state_dict(torch.load("ckpt.pth"))
    net.eval()
    with torch.no_grad():
        outputs = net(img)
        pred = outputs.max(1)[1]
    return int(pred)
Exemplo n.º 6
0
def eval_style(params):
    with tf.Session() as sess:
        with tf.variable_scope('eval_style'):
            M = process_mask(params.mask_path, params.num_colors)
            M = tf.constant(M, dtype=tf.float32, name='style_mask')
            h, w, c = M.get_shape()

            X = process_img(params.style_path, (h, w, 3))
            X = tf.expand_dims(X, 0)
            M = tf.stack([M])
            vggRef = VGG19(X, M, 'style_vgg')
            style_layers = [gram(l) for l in vggRef.style_layers]
            # return X, sess.run(style_layers), (h, w)
            return style_layers, (h, w)


# from tests import *
# from params import TrainingParams
# from models import SpriteGenerator

# def test_model(sess):

#     # tf.reset_default_graph()

#     params = TrainingParams()
#     Y, style_grams, input_shape = eval_style(params)

#     # tf.reset_default_graph()

#     # M is an example of a doodle
#     # for training, it is randomly generated using diamond square
#     M = tf.constant(generate_mask(params.num_colors, shape=input_shape), name='random_map', dtype=tf.float32)
#     R = tf.stack([M]) # batch them
#     # the randomly generated M is then given to the generator network to be transformed into the
#     # stylized artwork
#     generator = SpriteGenerator(R,'Gen')

#     # the output of the generator is then graded by the VGG19
#     train = VGG19(generator.output, M, 'train')

#     with tf.variable_scope('losses'):
#         loss = style_loss(train, style_grams, 1.0)

# summarize(test_model)
# test_model(None)
Exemplo n.º 7
0
    def __init__(self, input_shape=(256, 256, 3), target_layer=5,
                 decoder_path=None):
        self.input_shape = input_shape
        self.target_layer = target_layer

        self.encoder = VGG19(input_shape=input_shape, target_layer=target_layer)
        
        if decoder_path:
            self.decoder = load_model(decoder_path,custom_objects={'Unpooling':Unpooling})
        else:
            self.decoder = self.create_decoder(target_layer)

        self.model = Model(self.encoder.inputs, self.decoder(self.encoder.outputs))


        self.loss = self.create_loss_fn(self.encoder)

        self.model.compile('adam', self.loss)
Exemplo n.º 8
0
    def __init__(self,
                 input_shape=(256, 256, 3),
                 target_layer=5,
                 decoder_path=None):
        self.input_shape = input_shape
        self.target_layer = target_layer

        self.encoder = VGG19(input_shape=input_shape,
                             target_layer=target_layer)
        if decoder_path:
            self.decoder = load_model(decoder_path)
        else:
            self.decoder = self.create_decoder(target_layer)

        self.model = Sequential()
        self.model.add(self.encoder)
        self.model.add(self.decoder)

        self.loss = self.create_loss_fn(self.encoder)

        self.model.compile('adam', self.loss)
Exemplo n.º 9
0
    content_loss = torch.sum(torch.stack(content_loss_list))

    return content_loss


loss_L2 = torch.nn.MSELoss()
loss_L1 = torch.nn.L1Loss()

# Define model .
out_channels = 6 if with_IRT else 3
net = net.UNet(in_channels=3, out_channels=out_channels, init_features=32)
net.to(device)
optimizer = torch.optim.Adam(net.parameters(), lr=0.00005)
# scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[3000,8000], gamma=0.5)

VGG_19 = VGG19(requires_grad=False).to(device)

# prepare data
input_folders = [input_folder]
processed_folders = [processed_folder]


def prepare_paired_input(task, id, input_names, processed_names, is_train=0):
    net_in = np.float32(scipy.misc.imread(input_names[id])) / 255.0
    if len(net_in.shape) == 2:
        net_in = np.tile(net_in[:, :, np.newaxis], [1, 1, 3])
    net_gt = np.float32(scipy.misc.imread(processed_names[id])) / 255.0
    org_h, org_w = net_in.shape[:2]
    h = org_h // 32 * 32
    w = org_w // 32 * 32
    print(net_in.shape, net_gt.shape)
Exemplo n.º 10
0
import scipy.misc
import cv2
import glob
import os
import tensorflow as tf
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import sys
sys.path.append('../')
sys.path.append('../../utils')
sys.path.append('../../vgg19')
from testsrgan import SRGAN
from vgg import VGG19

model = VGG19(h,w,False)
sess = tf.Session()
init = tf.global_variables_initializer()
sess.run(init)

saver = tf.train.Saver()
saver.restore(sess, '../backup/latest')

img = cv2.imread('baboon.bmp')
h, w, c = img.shape
input_ = np.zeros((1,h,w,c))
rbg_img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = np.array(rbg_img) / 127.5 - 1
input_[0] = img
_, x_phi = sess.run([model.out, model.phi], feed_dict={model.x:input_, model.is_training:False})
Exemplo n.º 11
0
    model_path = '/home/aistudio/vid2vid/pretrained_models/vgg19/VGG19_pretrained'
    state_dict = fluid.io.load_program_state(model_path,)
    print(state_dict.keys())

    with fluid.dygraph.guard():
# ['conv1.conv1_1.conv.weight', 'conv1.conv1_2.conv.weight', 'conv2.conv2_1.conv.weight',
#  'conv2.conv2_2.conv.weight', 'conv3.conv3_1.conv.weight', 'conv3.conv3_2.conv.weight', 
#  'conv3.conv3_3.conv.weight', 'conv3.conv3_4.conv.weight', 'conv4.conv4_1.conv.weight', 
#  'conv4.conv4_2.conv.weight', 'conv4.conv4_3.conv.weight', 'conv4.conv4_4.conv.weight', 
#  'conv5.conv5_1.conv.weight', 'conv5.conv5_2.conv.weight', 'conv5.conv5_3.conv.weight', 
#  'conv5.conv5_4.conv.weight']
# ['conv5_3_weights', 'conv5_2_weights', 'conv3_2_weights', 'conv1_2_weights', 'conv4_2_weights', 
# 'fc8_weights', 'conv3_1_weights', 'conv2_2_weights', 'conv2_1_weights', 'conv5_4_weights', 'fc7_offset', 
# 'conv5_1_weights', 'fc6_weights', 'fc7_weights', 'conv4_4_weights', 'conv4_3_weights', 'conv3_4_weights',
# 'fc6_offset', 'conv3_3_weights', 'conv4_1_weights', 'fc8_offset', 'conv1_1_weights']
        model = VGG19()
        model.state_dict()['conv1.conv1_1.conv.weight'].set_value(state_dict['conv1_1_weights'])
        model.state_dict()['conv1.conv1_2.conv.weight'].set_value(state_dict['conv1_2_weights'])
        model.state_dict()['conv2.conv2_1.conv.weight'].set_value(state_dict['conv2_1_weights'])
        model.state_dict()['conv2.conv2_2.conv.weight'].set_value(state_dict['conv2_2_weights'])
        model.state_dict()['conv3.conv3_1.conv.weight'].set_value(state_dict['conv3_1_weights'])
        model.state_dict()['conv3.conv3_2.conv.weight'].set_value(state_dict['conv3_2_weights'])
        model.state_dict()['conv3.conv3_3.conv.weight'].set_value(state_dict['conv3_3_weights'])
        model.state_dict()['conv3.conv3_4.conv.weight'].set_value(state_dict['conv3_4_weights'])
        model.state_dict()['conv4.conv4_1.conv.weight'].set_value(state_dict['conv4_1_weights'])
        model.state_dict()['conv4.conv4_2.conv.weight'].set_value(state_dict['conv4_2_weights'])
        model.state_dict()['conv4.conv4_3.conv.weight'].set_value(state_dict['conv4_3_weights'])
        model.state_dict()['conv4.conv4_4.conv.weight'].set_value(state_dict['conv4_4_weights'])
        model.state_dict()['conv5.conv5_1.conv.weight'].set_value(state_dict['conv5_1_weights'])
        model.state_dict()['conv5.conv5_2.conv.weight'].set_value(state_dict['conv5_2_weights'])
        model.state_dict()['conv5.conv5_3.conv.weight'].set_value(state_dict['conv5_3_weights'])
Exemplo n.º 12
0
 def get_encodings(inputs):
     encoder = VGG19(inputs, self.input_shape, self.target_layer)
     return encoder.output
Exemplo n.º 13
0
from vgg import VGG19

parser = argparse.ArgumentParser()
parser.add_argument('--lr', type=float, default=0.0001, metavar='LR')
parser.add_argument('--epochs', type=int, default=2, metavar='N')
parser.add_argument('--batch_size', type=int, default=32, metavar='N')
parser.add_argument('--dataset_dir', type=str, default='./dataset', help='dataset path')
parser.add_argument("--savemodel_dir", type=str, default='./model.pkl', help='save model path')
args = parser.parse_args()

train_loader = get_loader(distorted_image_dir = '%s%s' % (args.dataset_dir, '/train/blur'),
                  corrected_image_dir = '%s%s' % (args.dataset_dir, '/train/rect'), 
                  batch_size = args.batch_size)

model = illNet()
vggnet = VGG19()
vggnet.load_state_dict(torch.load('./vgg19.pkl'))
for param in vggnet.parameters():
    param.requires_grad = False
    
criterion = nn.L1Loss()
vgg_loss = nn.L1Loss()

if torch.cuda.is_available():
    model = model.cuda()
    vggnet = vggnet.cuda()
    criterion = criterion.cuda()
    vgg_loss = vgg_loss.cuda()
    
if torch.cuda.device_count() > 1:
    model = nn.DataParallel(model)
Exemplo n.º 14
0
assert img_c_shape == img_s_shape, \
    'Content and style image should be the same shape, %s != %s' \
    % (str(img_c_shape), str(img_s_shape))

input_shape = img_c_shape

print('Loading decoders...')
decoders = {}
decoders[1] = load_model('./models/decoder_1.h5')
decoders[2] = load_model('./models/decoder_2.h5')
decoders[3] = load_model('./models/decoder_3.h5')
decoders[4] = load_model('./models/decoder_4.h5')
decoders[5] = load_model('./models/decoder_5.h5')

print('Loading VGG...')
vgg = VGG19(input_shape=input_shape, target_layer=5)

import matplotlib.pyplot as plt

plt.imshow(np.clip(img_c[0] / 255, 0, 1))
plt.show()

print('Styling...')
for i in [3, 1]:
    feats_c = get_vgg_features(vgg, img_c, i)
    feats_s = get_vgg_features(vgg, img_s, i)
    feats_cs = wct(feats_c, feats_s)
    img_c = decoders[i].predict(feats_cs)
    plt.imshow(np.clip(img_c[0] / 255, 0, 1))
    plt.show()
Exemplo n.º 15
0
def get_vgg_features(inputs, target_layer):
    encoder = VGG19(input_shape=(256, 256, 3), target_layer=target_layer)
    return encoder.predict(inputs)