예제 #1
0
    def __init__(self, layers, init_img, subject_img, style_img,
                 subject_weights, style_weights, smoothness=0.0):

        # Map weights (in convolution indices) to layer indices
        self.subject_weights = np.zeros(len(layers))
        self.style_weights = np.zeros(len(layers))
        layers_len = 0
        conv_idx = 0
        for l, layer in enumerate(layers):
            if isinstance(layer, dp.Activation):
                self.subject_weights[l] = subject_weights[conv_idx]
                self.style_weights[l] = style_weights[conv_idx]
                if subject_weights[conv_idx] > 0 or \
                   style_weights[conv_idx] > 0:
                    layers_len = l+1
                conv_idx += 1

        # Discard unused layers
        layers = layers[:layers_len]

        # Wrap convolution layers for better performance
        self.layers = [Convolution(l) if isinstance(l, dp.Convolution) else l
                       for l in layers]

        # Setup network
        x_shape = init_img.shape
        self.x = Parameter(init_img)
        self.x.setup(x_shape)
        for layer in self.layers:
            layer.setup(x_shape)
            x_shape = layer.y_shape(x_shape)

        # Precompute subject features and style Gram matrices
        self.subject_feats = [None]*len(self.layers)
        self.style_grams = [None]*len(self.layers)
        next_subject = ca.array(subject_img)
        next_style = ca.array(style_img)
        for l, layer in enumerate(self.layers):
            next_subject = layer.fprop(next_subject)
            next_style = layer.fprop(next_style)
            if self.subject_weights[l] > 0:
                self.subject_feats[l] = next_subject
            if self.style_weights[l] > 0:
                gram = gram_matrix(next_style)
                # Scale gram matrix to compensate for different image sizes
                n_pixels_subject = np.prod(next_subject.shape[2:])
                n_pixels_style = np.prod(next_style.shape[2:])
                scale = (n_pixels_subject / float(n_pixels_style))
                self.style_grams[l] = gram * scale

        self.tv_weight = smoothness
        kernel = np.array([[0, 1, 0], [1, -4, 1], [0, 1, 0]], dtype=dp.float_)
        kernel /= np.sum(np.abs(kernel))
        self.tv_kernel = ca.array(kernel[np.newaxis, np.newaxis, ...])
        self.tv_conv = ca.nnet.ConvBC01((1, 1), (1, 1))
예제 #2
0
    def __init__(self, layers, subject_img, style_img, subject_weights,
                 style_weights):

        # Map weights (in convolution indices) to layer indices
        self.subject_weights = np.zeros(len(layers))
        self.style_weights = np.zeros(len(layers))
        layers_len = 0
        conv_idx = 0
        for l, layer in enumerate(layers):
            if isinstance(layer, dp.Activation):
                self.subject_weights[l] = subject_weights[conv_idx]
                self.style_weights[l] = style_weights[conv_idx]
                if subject_weights[conv_idx] > 0 or \
                   style_weights[conv_idx] > 0:
                    layers_len = l + 1
                conv_idx += 1

        # Discard unused layers
        layers = layers[:layers_len]

        # Wrap convolution layers for better performance
        self.layers = [
            Convolution(l) if isinstance(l, dp.Convolution) else l
            for l in layers
        ]

        # Setup network
        x_shape = subject_img.shape
        self.x = Parameter(subject_img)
        self.x._setup(x_shape)
        for layer in self.layers:
            layer._setup(x_shape)
            x_shape = layer.y_shape(x_shape)

        # Precompute subject features and style Gram matrices
        self.subject_feats = [None] * len(self.layers)
        self.style_grams = [None] * len(self.layers)
        next_subject = ca.array(subject_img)
        next_style = ca.array(style_img)
        for l, layer in enumerate(self.layers):
            next_subject = layer.fprop(next_subject)
            next_style = layer.fprop(next_style)
            if self.subject_weights[l] > 0:
                self.subject_feats[l] = next_subject
            if self.style_weights[l] > 0:
                gram = gram_matrix(next_style)
                # Scale gram matrix to compensate for different image sizes
                n_pixels_subject = np.prod(next_subject.shape[2:])
                n_pixels_style = np.prod(next_style.shape[2:])
                scale = (n_pixels_subject / float(n_pixels_style))
                self.style_grams[l] = gram * scale