help='Standard deviation for random init.')
    parser.add_argument('--gpu', type=str, default='')
    parser.add_argument('--allow_growth', default=False, action='store_true')

    args = parser.parse_args()
    # Arguments parsed

    # Split the extension
    output_path, ext = os.path.splitext(args.output_path)
    if ext == '':
        ext = '.png'
    config_gpu(args.gpu, args.allow_growth)

    ## Precomputing the targets for content and style
    # Load content and style images
    content_image = preprocess_image_scale(args.content_image_path,
                                           img_size=args.img_size)
    style_images = [
        preprocess_image_scale(img, img_size=args.style_img_size)
        for img in args.style_image_path
    ]
    nb_styles = len(style_images)

    model = vgg19.VGG19(weights='imagenet', include_top=False)
    outputs_dict = dict([(layer.name, layer.output) for layer in model.layers])

    content_features = get_content_features(outputs_dict, args.content_layers)
    style_features = get_style_features(outputs_dict,
                                        args.style_layers,
                                        norm_by_channels=args.norm_by_channels)

    get_content_fun = K.function([model.input], content_features)
示例#2
0
    pastiche_net = pastiche_model(None,
                                  width_factor=model_args.width_factor,
                                  nb_classes=model_args.nb_classes,
                                  targets=class_targets)
    with h5py.File(checkpoint_path + '.h5', 'r') as f:
        pastiche_net.load_weights_from_hdf5_group(f['model_weights'])

    inputs = [pastiche_net.input, class_targets, K.learning_phase()]

    transfer_style = K.function(inputs, [pastiche_net.output])

    num_batches = int(np.ceil(model_args.nb_classes / float(args.batch_size)))

    for img_name in os.listdir(args.input_path):
        print('Processing %s' % img_name)
        img = preprocess_image_scale(os.path.join(args.input_path, img_name),
                                     img_size=args.img_size)
        imgs = np.repeat(img, model_args.nb_classes, axis=0)
        out_name = os.path.splitext(os.path.split(img_name)[-1])[0]

        for batch_idx in range(num_batches):
            idx = batch_idx * args.batch_size

            batch = imgs[idx:idx + args.batch_size]
            indices = batch_idx * args.batch_size + np.arange(batch.shape[0])

            if args.use_style_name:
                names = style_names[idx:idx + args.batch_size]
            else:
                names = indices
            print('  Processing styles %s' % str(names))
示例#3
0
    get_style_target = K.function([loss_net.input], s_targets)
    gm_lists = [[] for l in args.style_layers]

    img_list = []
    img_size_list = []
    # Get style image names or get all images in the directory
    if args.style_imgs is None:
        args.style_imgs = os.listdir(args.style_dir)

    # Check the image sizes
    args.style_img_size = std_input_list(args.style_img_size, len(args.style_imgs), 'Image size')

    for img_name, img_size in zip(args.style_imgs, args.style_img_size):
        try:
            print(img_name)
            img = preprocess_image_scale(os.path.join(args.style_dir, img_name),
                                         img_size=img_size)
            s_targets = get_style_target([img])
            for l, t in zip(gm_lists, s_targets):
                l.append(t)
            img_list.append(os.path.splitext(img_name)[0])
            img_size_list.append(img_size)
        except IOError as e:
            print('Could not open file %s as image.' %img_name)

    mtx = []
    for l in gm_lists:
        mtx.append(np.concatenate(l))

    f = h5py.File(args.gram_dataset_path, 'w')

    f.attrs['img_names'] = img_list