Exemple #1
0
 def build(self, input_shape):
     params = dict()
     mu = K.variable(0)
     sigma = K.variable(0.1, constraint=keras.constraints.non_neg()) # , constraint=lambda x: max(0, x))
     x_up = K.variable(0.1, constraint=keras.constraints.non_neg()) # , constraint=lambda x: max(0, x))
     x_down = K.variable(0.1, constraint=keras.constraints.non_neg()) # , constraint=lambda x: max(0, x))
     alpha_up = K.variable(0.5, constraint=keras.constraints.non_neg()) # , constraint=lambda x: max(0, x))
     alpha_down = K.variable(0.5, constraint=keras.constraints.non_neg()) # , constraint=lambda x: max(0, x))
     self.params = dict(mu=mu, sigma=sigma, x_up=x_up, x_down=x_down, alpha_up=alpha_up, alpha_down=alpha_down)
     super().build(input_shape)
def get_total_loss(content_losses, style_losses, total_var_loss,
                   content_weights, style_weights, tv_weights, class_targets):
    total_loss = K.variable(0.)

    # Compute content losses
    for loss in content_losses:
        weighted_loss = K.mean(K.gather(content_weights, class_targets) * loss)
        weighted_content_losses.append(weighted_loss)
        total_loss += weighted_loss

    # Compute style losses
    for loss in style_losses:
        weighted_loss = K.mean(K.gather(style_weights, class_targets) * loss)
        weighted_style_losses.append(weighted_loss)
        total_loss += weighted_loss

    # Compute tv loss
    weighted_tv_loss = K.mean(
        K.gather(tv_weights, class_targets) * total_var_loss)
    total_loss += weighted_tv_loss

    return (total_loss, weighted_content_losses, weighted_style_losses,
            weighted_tv_loss)
Exemple #3
0
def make_loss_function_wcc(weights):
    """ make loss function: weighted categorical crossentropy
        Args:
            * weights<ktensor|nparray|list>: crossentropy weights
        Returns:
            * weighted categorical crossentropy function
    """
    if isinstance(weights, list) or isinstance(weights, np.ndarray):
        weights = K.variable(weights)

    def loss(target, output, from_logits=False):
        if not from_logits:
            output /= tf.reduce_sum(output, len(output.get_shape()) - 1, True)
            _epsilon = tf.convert_to_tensor(K.epsilon(),
                                            dtype=output.dtype.base_dtype)
            output = tf.clip_by_value(output, _epsilon, 1. - _epsilon)
            weighted_losses = target * tf.log(output) * weights
            return -tf.reduce_sum(weighted_losses, len(output.get_shape()) - 1)
        else:
            raise ValueError(
                'WeightedCategoricalCrossentropy: not valid with logits')

    return loss
    def build(self, input_shape):
        input_shape = to_tuple(input_shape)
        if self.data_format == 'channels_first':
            stack_size = input_shape[1]
            self.kernel_shape = (self.filters, stack_size, self.nb_row,
                                 self.nb_col)
            self.kernel_norm_shape = (1, stack_size, self.nb_row, self.nb_col)
        elif self.data_format == 'channels_last':
            stack_size = input_shape[3]
            self.kernel_shape = (self.nb_row, self.nb_col, stack_size,
                                 self.filters)
            self.kernel_norm_shape = (self.nb_row, self.nb_col, stack_size, 1)
        else:
            raise ValueError('Invalid data_format:', self.data_format)
        self.W = self.add_weight(shape=self.kernel_shape,
                                 initializer=partial(self.kernel_initializer),
                                 name='{}_W'.format(self.name),
                                 regularizer=self.kernel_regularizer,
                                 constraint=self.kernel_constraint)

        kernel_norm_name = '{}_kernel_norm'.format(self.name)
        self.kernel_norm = K.variable(np.ones(self.kernel_norm_shape),
                                      name=kernel_norm_name)

        if self.use_bias:
            self.b = self.add_weight(shape=(self.filters, ),
                                     initializer='zero',
                                     name='{}_b'.format(self.name),
                                     regularizer=self.bias_regularizer,
                                     constraint=self.bias_constraint)
        else:
            self.b = None

        if self.initial_weights is not None:
            self.set_weights(self.initial_weights)
            del self.initial_weights
        self.built = True
Exemple #5
0
    def __init__(self,
                 input_shape,
                 classes,
                 epochs,
                 batch_size,
                 extract_length=32 * 32 * 3 * 1,
                 extract_shape=(1, 32, 32, 3),
                 optimizer='adam',
                 dataset='cifar'):

        self.input_shape = input_shape
        self.classes = classes
        self.model = self.get_model()
        self.epochs = epochs
        self.batch_size = batch_size
        self.extract_length = extract_length
        self.extract_shape = extract_shape
        self.optimizer = optimizer
        self.dataset = dataset

        self.loss_value = None
        self.acc_value = None

        self.train_data, self.test_data = load_data(name=self.dataset,
                                                    classes=self.classes)

        # Original design
        #self.extracted_data = self.train_data[0].flatten()[:self.extract_length]
        self.extracted_data = self.train_data[0].flatten(
        )[:self.extract_length * 3]
        self.extracted_data = rgb_to_grayscale(
            self.extracted_data.reshape(self.extract_shape)).flatten()
        #self.total_weights = self.model.get_weights()
        self.total_weights = K.variable(
            extract_params(self.extract_length, self.model.get_weights()))

        self.model = self.compile_model()
Exemple #6
0
    def __init__(self,
                 input_dim,
                 train_ds,
                 val_ds=None,
                 filename="weightsvgg.h5",
                 coverage=0.8,
                 alpha=0.5,
                 train=True,
                 baseline=False):
        self.lamda = coverage
        self.alpha = alpha
        self.mc_dropout_rate = K.variable(value=0)
        self.num_classes = 2
        self.weight_decay = 0.0005

        self.x_shape = input_dim
        self.filename = filename

        self.model = self.build_model()
        if baseline:
            self.alpha = 0

        if (train & os.path.isfile("saved_data/{}".format(self.filename))):
            self.model.load_weights("saved_data/{}".format(self.filename))
def get_style_loss():
    '''
    这不是通常意义上的函数,这是一个函数调用。因为不用向其传递引用。
    x_feature是一个tensor,shape未知
    '''
    layer_names = ['block3_conv1', 'block4_conv1']
    style_loss = K.variable(0)

    for layer_name in layer_names:
        x_feature = get_feature_map(layer_name)[0][0]
        s_feature = get_feature_val(layer_name, s_im)
        # (56,56,256)
        size, size1, channels = x_feature.shape

        for i in range(channels):

            C = x_feature[:, :, i]
            S = s_feature[:, :, i].transpose()
            el = K.sum(K.square(S - C))

            el = el / (4.0 * int(channels) * int(channels) * int(size) *
                       int(size))
            style_loss = style_loss + el
    return style_loss
Exemple #8
0
    def __init__(self, optimizer, steps_per_update=1, **kwargs):
        super(AccumOptimizer, self).__init__(**kwargs)
        self.optimizer = optimizer
        with K.name_scope(self.__class__.__name__):
            self.steps_per_update = steps_per_update
            self.iterations = K.variable(0, dtype='int64', name='iterations')
            self.cond = K.equal(self.iterations % self.steps_per_update, 0)
            self.lr = self.optimizer.lr
            self.optimizer.lr = K.switch(self.cond, self.optimizer.lr, 0.)
            for attr in ['momentum', 'rho', 'beta_1', 'beta_2']:
                if hasattr(self.optimizer, attr):
                    value = getattr(self.optimizer, attr)
                    setattr(self, attr, value)
                    setattr(self.optimizer, attr,
                            K.switch(self.cond, value, 1 - 1e-7))
            for attr in self.optimizer.get_config():
                if not hasattr(self, attr):
                    value = getattr(self.optimizer, attr)
                    setattr(self, attr, value)
            # Cover the original get_gradients method with accumulative gradients.
            def get_gradients(loss, params):
                return [ag / self.steps_per_update for ag in self.accum_grads]

            self.optimizer.get_gradients = get_gradients
Exemple #9
0
def weighted_categorical_crossentropy(weights):
    """
    A weighted version of keras.objectives.categorical_crossentropy
    
    Variables:
        weights: numpy array of shape (C,) where C is the number of classes
    
    Usage:
        weights = np.array([0.5,2,10]) # Class one at 0.5, class 2 twice the normal weights, class 3 10x.
        loss = weighted_categorical_crossentropy(weights)
        model.compile(loss=loss,optimizer='adam')
    """
    weights = K.variable(weights)
    def loss(y_true, y_pred):
        # scale predictions so that the class probas of each sample sum to 1
        y_pred /= K.sum(y_pred, axis=-1, keepdims=True)
        # clip to prevent NaN's and Inf's
        y_pred = K.clip(y_pred, K.epsilon(), 1 - K.epsilon())
        # calc
        loss = y_true * K.log(y_pred) * weights
        loss = -K.sum(loss, -1)
        return loss
  
    return loss
Exemple #10
0
def make_soft(y_true, fragment_length, nb_output_bins, train_with_soft_target_stdev, with_prints=False):
    receptive_field, _ = compute_receptive_field()
    n_outputs = fragment_length - receptive_field + 1

    # Make a gaussian kernel.
    kernel_v = scipy.signal.gaussian(9, std=train_with_soft_target_stdev)
    print(kernel_v)
    kernel_v = np.reshape(kernel_v, [1, 1, -1, 1])
    kernel = K.variable(kernel_v)

    if with_prints:
        y_true = print_t(y_true, 'y_true initial')

    # y_true: [batch, timesteps, input_dim]
    y_true = K.reshape(y_true, (-1, 1, nb_output_bins, 1))  # Same filter for all output; combine with batch.
    # y_true: [batch*timesteps, n_channels=1, input_dim, dummy]
    y_true = K.conv2d(y_true, kernel, padding='same')
    y_true = K.reshape(y_true, (-1, n_outputs, nb_output_bins))  # Same filter for all output; combine with batch.
    # y_true: [batch, timesteps, input_dim]
    y_true /= K.sum(y_true, axis=-1, keepdims=True)

    if with_prints:
        y_true = print_t(y_true, 'y_true after')
    return y_true
Exemple #11
0
def test_magnitude_to_decibel(dynamic_range, dtype: str):
    """test for backend_keras.magnitude_to_decibel"""

    x = np.array(
        [[1e-20, 1e-5, 1e-3, 5e-2], [0.3, 1.0, 20.5, 9999]], dtype=dtype
    )  # random positive numbers

    amin = 1e-5
    x_decibel_ref = np.stack(
        (
            librosa.power_to_db(x[0], amin=amin, ref=1.0, top_db=dynamic_range),
            librosa.power_to_db(x[1], amin=amin, ref=1.0, top_db=dynamic_range),
        ),
        axis=0,
    )

    x_var = K.variable(x)
    x_decibel_kapre = magnitude_to_decibel(
        x_var, ref_value=1.0, amin=amin, dynamic_range=dynamic_range
    )
    if dtype == 'float16':
        np.testing.assert_allclose(K.eval(x_decibel_kapre), x_decibel_ref, rtol=1e-3, atol=TOL)
    else:
        np.testing.assert_allclose(K.eval(x_decibel_kapre), x_decibel_ref, atol=TOL)
Exemple #12
0
 def __init__(self,
              axis=-1,
              momentum=0.99,
              epsilon=1e-3,
              center=True,
              scale=True,
              beta_initializer='zeros',
              gamma_initializer='ones',
              moving_mean_initializer='zeros',
              moving_variance_initializer='ones',
              beta_regularizer=None,
              gamma_regularizer=None,
              beta_constraint=None,
              gamma_constraint=None,
              **kwargs):
     self._trainable = True
     self._trainable_tensor = K.variable(1,
                                         dtype='float32',
                                         name='trainable')
     super(BatchNormalization, self).__init__(**kwargs)
     self.supports_masking = True
     self.axis = axis
     self.momentum = momentum
     self.epsilon = epsilon
     self.center = center
     self.scale = scale
     self.beta_initializer = initializers.get(beta_initializer)
     self.gamma_initializer = initializers.get(gamma_initializer)
     self.moving_mean_initializer = initializers.get(
         moving_mean_initializer)
     self.moving_variance_initializer = initializers.get(
         moving_variance_initializer)
     self.beta_regularizer = regularizers.get(beta_regularizer)
     self.gamma_regularizer = regularizers.get(gamma_regularizer)
     self.beta_constraint = constraints.get(beta_constraint)
     self.gamma_constraint = constraints.get(gamma_constraint)
Exemple #13
0
def weighted_loss(y_true, y_pred):
    """
    Will be used as the metric in model.compile()
    ---------------------------------------------
    
    Similar to the custom loss function 'weighted_log_loss()' above
    but with normalized weights, which should be very similar 
    to the official competition metric:
        https://www.kaggle.com/kambarakun/lb-probe-weights-n-of-positives-scoring
    and hence:
        sklearn.metrics.log_loss with sample weights
    """

    class_weights = K.variable([2., 1., 1., 1., 1., 1.])

    eps = K.epsilon()

    y_pred = K.clip(y_pred, eps, 1.0 - eps)

    loss = -(y_true * K.log(y_pred) + (1.0 - y_true) * K.log(1.0 - y_pred))

    loss_samples = _normalized_weighted_average(loss, class_weights)

    return K.mean(loss_samples)
Exemple #14
0
    def __init__(self,
                 lr=0.001,
                 beta_1=0.9,
                 beta_2=0.999,
                 epsilon=1e-6,
                 weight_decay=0.,
                 exclude_from_weight_decay=None,
                 **kwargs):
        super().__init__(name='LambOptimizer', **kwargs)
        with K.name_scope(self.__class__.__name__):
            self.iterations = K.variable(0, dtype='int64', name='iterations')
            self.lr = K.variable(lr, dtype='float32', name='lr')
            self.beta_1 = K.variable(beta_1, dtype='float32', name='beta_1')
            self.beta_2 = K.variable(beta_2, dtype='float32', name='beta_2')
            self.epsilon = K.variable(epsilon, dtype='float32', name='epsilon')
            self.weight_decay = K.variable(weight_decay,
                                           dtype='float32',
                                           name='weight_decay')

        self.exclude_from_weight_decay = exclude_from_weight_decay
Exemple #15
0
 def build(self, input_shape):
     params = dict()
     mu = K.variable(0)
     sigma = K.variable(0.1, constraint=keras.constraints.non_neg()
                        )  # , constraint=lambda x: max(0, x))
     x_up = K.variable(0.1, constraint=keras.constraints.non_neg()
                       )  # , constraint=lambda x: max(0, x))
     x_down = K.variable(0.1, constraint=keras.constraints.non_neg()
                         )  # , constraint=lambda x: max(0, x))
     alpha_up = K.variable(0.5, constraint=keras.constraints.non_neg()
                           )  # , constraint=lambda x: max(0, x))
     alpha_down = K.variable(0.5, constraint=keras.constraints.non_neg()
                             )  # , constraint=lambda x: max(0, x))
     self.params = dict(mu=mu,
                        sigma=sigma,
                        x_up=x_up,
                        x_down=x_down,
                        alpha_up=alpha_up,
                        alpha_down=alpha_down)
     super().build(input_shape)
 def __init__(self,
              lr=0.001,
              beta_1=0.9,
              beta_2=0.999,
              weight_decay=1e-4,
              epsilon=1e-8,
              decay=0.,
              **kwargs):
     # decoupled weight decay (1/6)
     super(AdamW, self).__init__(**kwargs)
     with K.name_scope(self.__class__.__name__):
         self.iterations = K.variable(0, dtype='int64', name='iterations')
         self.lr = K.variable(lr, name='lr')
         # decoupled weight decay (2/6)
         self.init_lr = lr
         self.beta_1 = K.variable(beta_1, name='beta_1')
         self.beta_2 = K.variable(beta_2, name='beta_2')
         self.decay = K.variable(decay, name='decay')
         # decoupled weight decay (3/6)
         self.wd = K.variable(weight_decay, name='weight_decay')
     self.epsilon = epsilon
     self.initial_decay = decay
Exemple #17
0
 def __init__(self,
              lr=0.001,
              beta_1=0.9,
              beta_2=0.999,
              beta_3=0.999,
              epsilon=None,
              decay=0.,
              #amsgrad=False,
              **kwargs):
     super(AdaMod, self).__init__(name='AdaMod', **kwargs)
     with K.name_scope(self.__class__.__name__):
         self.iterations = K.variable(0, dtype='int64', name='iterations')
         self.lr = K.variable(lr, name='lr')
         self.beta_1 = K.variable(beta_1, name='beta_1')
         self.beta_2 = K.variable(beta_2, name='beta_2')
         self.beta_3 = K.variable(beta_3, name='beta_3')
         self.decay = K.variable(decay, name='decay')
         #self.amsgrad = K.variable(amsgrad, name='amsgrad')
     if epsilon is None:
         epsilon = K.epsilon()
     self.epsilon = epsilon
     self.initial_decay = decay
Exemple #18
0
 def __init__(
         self,
         learning_rate=0.001,
         beta_1=0.9,
         beta_2=0.999,
         weight_decay=1e-4,  # decoupled weight decay (1/4)
         epsilon=1e-8,
         decay=0.,
         name='adamw',
         **kwargs):
     super(AdamW, self).__init__(name, **kwargs)
     with K.name_scope(self.__class__.__name__):
         self.iterations = K.variable(0, dtype='int64', name='iterations')
         self.learning_rate = K.variable(learning_rate,
                                         name='learning_rate')
         self.beta_1 = K.variable(beta_1, name='beta_1')
         self.beta_2 = K.variable(beta_2, name='beta_2')
         self.decay = K.variable(decay, name='decay')
         self.wd = K.variable(
             weight_decay,
             name='weight_decay')  # decoupled weight decay (2/4)
     self.epsilon = epsilon
     self.initial_decay = decay
    get_style_fun = K.function([model.input], style_features)

    content_targets = get_content_fun([content_image])
    # List of list of features
    style_targets_list = [get_style_fun([img]) for img in style_images]

    # List of batched features
    style_targets = []
    for l in range(len(args.style_layers)):
        batched_features = []
        for i in range(nb_styles):
            batched_features.append(style_targets_list[i][l][None])
        style_targets.append(np.concatenate(batched_features))

    if args.init == 'content':
        pastiche_image = K.variable(np.repeat(content_image, nb_styles,
                                              axis=0))
    else:
        if args.init != 'random':
            print(
                'Could not recognize init arg \'%s\'. Falling back to random.'
                % args.init)
        pastiche_image = K.variable(
            args.std_init *
            np.random.randn(nb_styles, *content_image.shape[1:]))

    # Store targets as variables
    content_targets_dict = {
        k: K.variable(v)
        for k, v in zip(args.content_layers, content_targets)
    }
    style_targets_dict = {
 def loss(self, y_true, y_pred):
     total_loss = K.variable(0)
     for idx, loss in enumerate(self.losses):
         total_loss += self.loss_wts[idx] * loss(y_true, y_pred)
     return total_loss
    def build(self, input_shape):
        self.input_spec = [InputSpec(shape=input_shape)]
        self.conv_layers = {c: [] for c in ['i', 'f', 'c', 'o', 'a', 'ahat']}

        for l in range(self.nb_layers):
            for c in ['i', 'f', 'c', 'o']:
                act = self.LSTM_activation if c == 'c' else self.LSTM_inner_activation
                self.conv_layers[c].append(
                    Conv2D(self.R_stack_sizes[l],
                           self.R_filt_sizes[l],
                           padding='same',
                           activation=act,
                           data_format=self.data_format))

            act = 'relu' if l == 0 else self.A_activation
            self.conv_layers['ahat'].append(
                Conv2D(self.stack_sizes[l],
                       self.Ahat_filt_sizes[l],
                       padding='same',
                       activation=act,
                       data_format=self.data_format))

            if l < self.nb_layers - 1:
                self.conv_layers['a'].append(
                    Conv2D(self.stack_sizes[l + 1],
                           self.A_filt_sizes[l],
                           padding='same',
                           activation=self.A_activation,
                           data_format=self.data_format))

        self.upsample = UpSampling2D(data_format=self.data_format)
        self.pool = MaxPooling2D(data_format=self.data_format)

        self._trainable_weights = []
        nb_row, nb_col = (
            input_shape[-2],
            input_shape[-1]) if self.data_format == 'channels_first' else (
                input_shape[-3], input_shape[-2])
        for c in sorted(self.conv_layers.keys()):
            for l in range(len(self.conv_layers[c])):
                ds_factor = 2**l
                if c == 'ahat':
                    nb_channels = self.R_stack_sizes[l]
                elif c == 'a':
                    nb_channels = 2 * self.R_stack_sizes[l]
                else:
                    nb_channels = self.stack_sizes[l] * 2 + self.R_stack_sizes[
                        l]
                    if l < self.nb_layers - 1:
                        nb_channels += self.R_stack_sizes[l + 1]
                in_shape = (input_shape[0], nb_channels, nb_row // ds_factor,
                            nb_col // ds_factor)
                if self.data_format == 'channels_last':
                    in_shape = (in_shape[0], in_shape[2], in_shape[3],
                                in_shape[1])
                with K.name_scope('layer_' + c + '_' + str(l)):
                    self.conv_layers[c][l].build(in_shape)
                self._trainable_weights += self.conv_layers[c][
                    l].trainable_weights

        self.states = [None] * self.nb_layers * 3

        if self.extrap_start_time is not None:
            self.t_extrap = K.variable(
                self.extrap_start_time,
                int if K.backend() != 'tensorflow' else 'int32')
            self.states += [None] * 2  # [previous frame prediction, timestep]

        self.built = True
Exemple #22
0
    def call(self, x, mask=None):
        # TODO: validate input shape

        assert (len(x) == 3)
        L_flat = x[0]
        mu = x[1]
        a = x[2]

        if self.mode == 'full':
            # Create L and L^T matrix, which we use to construct the positive-definite matrix P.
            L = None
            LT = None
            if K.backend() == 'theano':
                import theano.tensor as T
                import theano

                def fn(x, L_acc, LT_acc):
                    x_ = K.zeros((self.nb_actions, self.nb_actions))
                    x_ = T.set_subtensor(x_[np.tril_indices(self.nb_actions)], x)
                    diag = K.exp(T.diag(x_)) + K.epsilon()
                    x_ = T.set_subtensor(x_[np.diag_indices(self.nb_actions)], diag)
                    return x_, x_.T

                outputs_info = [
                    K.zeros((self.nb_actions, self.nb_actions)),
                    K.zeros((self.nb_actions, self.nb_actions)),
                ]
                results, _ = theano.scan(fn=fn, sequences=L_flat, outputs_info=outputs_info)
                L, LT = results
            elif K.backend() == 'tensorflow':
                import tensorflow as tf

                # Number of elements in a triangular matrix.
                nb_elems = (self.nb_actions * self.nb_actions + self.nb_actions) // 2

                # Create mask for the diagonal elements in L_flat. This is used to exponentiate
                # only the diagonal elements, which is done before gathering.
                diag_indeces = [0]
                for row in range(1, self.nb_actions):
                    diag_indeces.append(diag_indeces[-1] + (row + 1))
                diag_mask = np.zeros(1 + nb_elems)  # +1 for the leading zero
                diag_mask[np.array(diag_indeces) + 1] = 1
                diag_mask = K.variable(diag_mask)

                # Add leading zero element to each element in the L_flat. We use this zero
                # element when gathering L_flat into a lower triangular matrix L.
                nb_rows = tf.shape(L_flat)[0]
                zeros = tf.expand_dims(tf.tile(K.zeros((1,)), [nb_rows]), 1)
                try:
                    # Old TF behavior.
                    L_flat = tf.concat(1, [zeros, L_flat])
                except (TypeError, ValueError):
                    # New TF behavior
                    L_flat = tf.concat([zeros, L_flat], 1)

                # Create mask that can be used to gather elements from L_flat and put them
                # into a lower triangular matrix.
                tril_mask = np.zeros((self.nb_actions, self.nb_actions), dtype='int32')
                tril_mask[np.tril_indices(self.nb_actions)] = range(1, nb_elems + 1)

                # Finally, process each element of the batch.
                init = [
                    K.zeros((self.nb_actions, self.nb_actions)),
                    K.zeros((self.nb_actions, self.nb_actions)),
                ]

                def fn(a, x):
                    # Exponentiate everything. This is much easier than only exponentiating
                    # the diagonal elements, and, usually, the action space is relatively low.
                    x_ = K.exp(x) + K.epsilon()
                    # Only keep the diagonal elements.
                    x_ *= diag_mask
                    # Add the original, non-diagonal elements.
                    x_ += x * (1. - diag_mask)
                    # Finally, gather everything into a lower triangular matrix.
                    L_ = tf.gather(x_, tril_mask)
                    return [L_, tf.transpose(L_)]

                tmp = tf.scan(fn, L_flat, initializer=init)
                if isinstance(tmp, (list, tuple)):
                    # TensorFlow 0.10 now returns a tuple of tensors.
                    L, LT = tmp
                else:
                    # Old TensorFlow < 0.10 returns a shared tensor.
                    L = tmp[:, 0, :, :]
                    LT = tmp[:, 1, :, :]
            else:
                raise RuntimeError('Unknown Keras backend "{}".'.format(K.backend()))
            assert L is not None
            assert LT is not None
            P = K.batch_dot(L, LT)
        elif self.mode == 'diag':
            if K.backend() == 'theano':
                import theano.tensor as T
                import theano

                def fn(x, P_acc):
                    x_ = K.zeros((self.nb_actions, self.nb_actions))
                    x_ = T.set_subtensor(x_[np.diag_indices(self.nb_actions)], x)
                    return x_

                outputs_info = [
                    K.zeros((self.nb_actions, self.nb_actions)),
                ]
                P, _ = theano.scan(fn=fn, sequences=L_flat, outputs_info=outputs_info)
            elif K.backend() == 'tensorflow':
                import tensorflow as tf

                # Create mask that can be used to gather elements from L_flat and put them
                # into a diagonal matrix.
                diag_mask = np.zeros((self.nb_actions, self.nb_actions), dtype='int32')
                diag_mask[np.diag_indices(self.nb_actions)] = range(1, self.nb_actions + 1)

                # Add leading zero element to each element in the L_flat. We use this zero
                # element when gathering L_flat into a lower triangular matrix L.
                nb_rows = tf.shape(L_flat)[0]
                zeros = tf.expand_dims(tf.tile(K.zeros((1,)), [nb_rows]), 1)
                try:
                    # Old TF behavior.
                    L_flat = tf.concat(1, [zeros, L_flat])
                except (TypeError, ValueError):
                    # New TF behavior
                    L_flat = tf.concat([zeros, L_flat], 1)

                # Finally, process each element of the batch.
                def fn(a, x):
                    x_ = tf.gather(x, diag_mask)
                    return x_

                P = tf.scan(fn, L_flat, initializer=K.zeros((self.nb_actions, self.nb_actions)))
            else:
                raise RuntimeError('Unknown Keras backend "{}".'.format(K.backend()))
        assert P is not None
        assert K.ndim(P) == 3

        # Combine a, mu and P into a scalar (over the batches). What we compute here is
        # -.5 * (a - mu)^T * P * (a - mu), where * denotes the dot-product. Unfortunately
        # TensorFlow handles vector * P slightly suboptimal, hence we convert the vectors to
        # 1xd/dx1 matrices and finally flatten the resulting 1x1 matrix into a scalar. All
        # operations happen over the batch size, which is dimension 0.
        prod = K.batch_dot(K.expand_dims(a - mu, 1), P)
        prod = K.batch_dot(prod, K.expand_dims(a - mu, -1))
        A = -.5 * K.batch_flatten(prod)
        assert K.ndim(A) == 2
        return A
Exemple #23
0
def styleTransfer(cData, sData, tData):
    print("   Building transfer model.")
    contentTensor = K.variable(cData)
    styleTensor = K.variable(sData)
    genTensor = K.placeholder((1, CONTENT_IMG_H, CONTENT_IMG_W, 3))
    inputTensor = K.concatenate([contentTensor, styleTensor, genTensor],
                                axis=0)
    model = vgg19.VGG19(include_top=False,
                        weights="imagenet",
                        input_tensor=inputTensor)  ######
    outputDict = dict([(layer.name, layer.output) for layer in model.layers])
    print("   VGG19 model loaded.")
    loss = 0.0
    styleLayerNames = [
        "block1_conv1", "block2_conv1", "block3_conv1", "block4_conv1",
        "block5_conv1"
    ]
    contentLayerName = "block5_conv2"
    print("   Calculating content loss.")
    contentLayer = outputDict[contentLayerName]
    contentOutput = contentLayer[0, :, :, :]
    genOutput = contentLayer[2, :, :, :]

    loss += CONTENT_WEIGHT * contentLoss(contentOutput, genOutput)

    print("   Calculating style loss.")

    for layerName in styleLayerNames:
        layer_features = outputDict[layerName]
        style_reference_features = layer_features[1, :, :, :]
        gen_features = layer_features[2, :, :, :]
        loss += (STYLE_WEIGHT / len(styleLayerNames)) * styleLoss(
            style_reference_features, gen_features)

    loss += TOTAL_WEIGHT * totalLoss(genTensor)
    # TODO: Setup gradients or use K.gradients().###########################
    grads = K.gradients(loss, genTensor)

    outputs = [loss]
    if isinstance(grads, (list, tuple)):
        outputs += grads
    else:
        outputs.append(grads)

    f_outputs = K.function([genTensor], outputs)

    def eval_loss(x):
        if K.image_data_format() == "channels_first":
            x = x.reshape((1, 3, CONTENT_IMG_H, CONTENT_IMG_W))
        else:
            x = x.reshape((1, CONTENT_IMG_H, CONTENT_IMG_W, 3))
        outs = f_outputs([x])
        loss_val = outs[0]
        return loss_val

    def eval_grads(x):
        if K.image_data_format() == "channels_first":
            x = x.reshape((1, 3, CONTENT_IMG_H, CONTENT_IMG_W))
        else:
            x = x.reshape((1, CONTENT_IMG_H, CONTENT_IMG_W, 3))
        outs = f_outputs([x])
        if len(outs[1:]) == 1:
            grad_vals = outs[1].flatten().astype("float64")
        else:
            grad_vals = np.array(outs[1:]).flatten().astype("float64")
        return grad_vals

    loadedImg = load_img(CONTENT_IMG_PATH)
    x = preprocessData((loadedImg, CONTENT_IMG_H, CONTENT_IMG_W))

    print("   Beginning transfer.")
    for i in range(TRANSFER_ROUNDS):
        print("   Step %d." % i)
        #TODO: perform gradient descent using fmin_l_bfgs_b.#######################
        x, min, info = fmin_l_bfgs_b(eval_loss,
                                     x.flatten(),
                                     fprime=eval_grads,
                                     maxfun=20)
        print("      Loss: %f." % min)
        img = deprocessImage(x.copy())
        saveFile = "/Users/alex_p/Desktop/CS390NIP/Lab2/" + "transfer_round_%d.png" % i
        imsave(saveFile, img)  #Uncomment when everything is working right.
        print("      Image saved to \"%s\"." % saveFile)
    print("   Transfer complete.")
Exemple #24
0
def yolo_head(feats, anchors, num_classes):
    """Convert final layer features to bounding box parameters.
    Parameters
    ----------
    feats : tensor
        Final convolutional layer features.
    anchors : array-like
        Anchor box widths and heights.
    num_classes : int
        Number of target classes.
    Returns
    -------
    box_xy : tensor
        x, y box predictions adjusted by spatial location in conv layer.
    box_wh : tensor
        w, h box predictions adjusted by anchors and conv spatial resolution.
    box_conf : tensor
        Probability estimate for whether each box contains any object.
    box_class_pred : tensor
        Probability distribution estimate for each box over class labels.
    """
    num_anchors = len(anchors)
    # Reshape to batch, height, width, num_anchors, box_params.
    anchors_tensor = K.reshape(K.variable(anchors), [1, 1, 1, num_anchors, 2])

    # Static implementation for fixed models.
    # TODO: Remove or add option for static implementation.
    # _, conv_height, conv_width, _ = K.int_shape(feats)
    # conv_dims = K.variable([conv_width, conv_height])

    # Dynamic implementation of conv dims for fully convolutional model.
    conv_dims = K.shape(feats)[1:3]  # assuming channels last
    # In YOLO the height index is the inner most iteration.
    conv_height_index = K.arange(0, stop=conv_dims[0])
    conv_width_index = K.arange(0, stop=conv_dims[1])
    conv_height_index = K.tile(conv_height_index, [conv_dims[1]])

    # TODO: Repeat_elements and tf.split doesn't support dynamic splits.
    # conv_width_index = K.repeat_elements(conv_width_index, conv_dims[1], axis=0)
    conv_width_index = K.tile(K.expand_dims(conv_width_index, 0),
                              [conv_dims[0], 1])
    conv_width_index = K.flatten(K.transpose(conv_width_index))
    conv_index = K.transpose(K.stack([conv_height_index, conv_width_index]))
    conv_index = K.reshape(conv_index, [1, conv_dims[0], conv_dims[1], 1, 2])
    # print(feats.dtype)
    conv_index = K.cast(conv_index,
                        feats.dtype)  # 原本是K.dtype(feats),但是不知道为什么报错

    feats = K.reshape(
        feats, [-1, conv_dims[0], conv_dims[1], num_anchors, num_classes + 5])
    conv_dims = K.cast(K.reshape(conv_dims, [1, 1, 1, 1, 2]), K.dtype(feats))

    # Static generation of conv_index:
    # conv_index = np.array([_ for _ in np.ndindex(conv_width, conv_height)])
    # conv_index = conv_index[:, [1, 0]]  # swap columns for YOLO ordering.
    # conv_index = K.variable(
    #     conv_index.reshape(1, conv_height, conv_width, 1, 2))
    # feats = Reshape(
    #     (conv_dims[0], conv_dims[1], num_anchors, num_classes + 5))(feats)

    box_xy = K.sigmoid(feats[..., :2])
    box_wh = K.exp(feats[..., 2:4])
    box_confidence = K.sigmoid(feats[..., 4:5])
    box_class_probs = K.softmax(feats[..., 5:])

    # Adjust preditions to each spatial grid point and anchor size.
    # Note: YOLO iterates over height index before width index.
    box_xy = (box_xy + conv_index) / conv_dims
    box_wh = box_wh * anchors_tensor / conv_dims

    return box_confidence, box_xy, box_wh, box_class_probs
Exemple #25
0
 def build(self, input_shape):
     self.input_spec = [InputSpec(shape=input_shape)]
     gamma = self.gamma_init * np.ones((input_shape[self.axis], ))
     self.gamma = K.variable(gamma, name='{}_gamma'.format(self.name))
     self.trainable_weights.append([self.gamma])
     super(L2Normalization, self).build(input_shape)
# Aqui carregamos e alteramos o tamanho da imagem de estilo
h, w = content_img.shape[1:3]
style_img = load_img_and_preprocess('pintura.jpg', (h, w))

batch_shape = content_img.shape
shape = content_img.shape[1:]

# Aqui criamos o modelo vgg16 com camadas de maxpooling alteradas.
vgg = vgg16_avgpool(shape)

# Aqui criamos o modelo que irá lidar com a imagem principal e selecionamos
# a camada que queremos usar, quando mais profunda a camada, mais borrada a imagem,
# quando mais raza mais nítida a imagem.
# também definimos o alvo(target).
content_model = Model(vgg.input, vgg.layers[10].get_output_at(0))
content_target = K.variable(content_model.predict(content_img))

# Aqui criamos o modelo para a imagem de estilo
# nesse modelo, diferentemente do modelo para a imagem principal onde temos
# apenas 1 output, teremos varios outputs.
symbolic_conv_outputs = [
    layer.get_output_at(1) for layer in vgg.layers
    if layer.name.endswith('conv1')
]

# Criando o modelo que terá multíplos outputs.
style_model = Model(vgg.input, symbolic_conv_outputs)

# Calculando os alvos que são outputs de cada camada
style_layers_output = [K.variable(y) for y in style_model.predict(style_img)]
Exemple #27
0
 def __init__(self):
     self.gamma = K.variable(2.)
Exemple #28
0
    x = preprocess_input(x)

    # we'll use this throughout the rest of the script
    batch_shape = x.shape
    shape = x.shape[1:]

    # see the image
    plt.imshow(img)
    plt.show()

    # make a content model
    # try different cutoffs to see the images that result
    content_model = VGG16_AvgPool_CutOff(shape, 11)

    # make the target
    target = K.variable(content_model.predict(x))

    # try to match the image

    # define our loss in keras
    loss = K.mean(K.square(target - content_model.output))

    # gradients which are needed by the optimizer
    grads = K.gradients(loss, content_model.input)

    # just like theano.function
    get_loss_and_grads = K.function(inputs=[content_model.input],
                                    outputs=[loss] + grads)

    def get_loss_and_grads_wrapper(x_vec):
        # scipy's minimizer allows us to pass back
def get_suggested_scheduler(init_lr=5e-5, total_steps=10000, warmup_ratio=0.1):
	opt_lr = K.variable(init_lr)
	warmup_steps = int(warmup_ratio * total_steps)
	warmup = WarmupCallback(opt_lr, init_lr, total_steps, warmup_steps)
	return warmup, opt_lr
Exemple #30
0
def main():
    '''pydot.Dot.create(pydot.Dot())'''
    print tf.__version__
    samples = list()
    for i in range(0,2000):
        samples.append((random_process(1.0,0.0),0))
    for i in range(0,2000):
        adjustCoeff = 0.88+random.random()/10.0
        samples.append((random_process(adjustCoeff,0.0),1))
    for i in range(0,2000):
        drift = 0.0001+(random.random()/10000.0)
        samples.append((random_process(1.0,drift),2))

    random.shuffle(samples)
    test = list()
    for i in range(0, 100):
        test.append((random_process(1.0, 0.0), 0))
    for i in range(0, 100):
        adjustCoeff = 0.88 + random.random() / 10.0
        test.append((random_process(adjustCoeff, 0.0), 1))
    for i in range(0, 100):
        drift = 0.0001 + (random.random() / 10000.0)
        test.append((random_process(1.0, drift), 2))

    j = 0
    classes=["RANDOM_WALK","MEAN_REVERTING","DRIFT"]

    training = [s[0] for s in samples]
    labels = [s[1] for s in samples]
    t = K.variable(training)
    l = K.variable(labels)

    tst = K.variable([ts[0] for ts in test])
    tstLabel = K.variable([ts[1] for ts in test])

    print t
    print t.shape

    plt.figure(figsize=(10, 10))
    for i in range(25):
        j = int(random.random()*3000)
        plt.subplot(5, 5, i + 1)
        plt.xticks([])
        plt.yticks([])
        plt.grid(False)
        plt.plot(training[j])
        plt.xlabel(classes[labels[j]])
    plt.show()

    model = keras.Sequential([
        keras.layers.Dropout(0.2, input_shape=(500,)),
        keras.layers.Dense(128, activation='relu'),
        keras.layers.Dropout(0.2),
        keras.layers.Dense(10, activation='relu'),
        keras.layers.Dense(3, activation='softmax')
    ])

    '''keras.utils.plot_model(model, 'model.png')'''

    model.compile(optimizer='adam',
                  loss='sparse_categorical_crossentropy',
                  metrics=['accuracy'])

    history = model.fit(t, l, epochs=10,steps_per_epoch=100)

    # Plot training & validation accuracy values

    plt.plot(history.history['loss'])
    plt.plot(history.history['accuracy'])
    plt.title('Model accuracy')
    plt.ylabel('Accuracy')
    plt.xlabel('Epoch')
    plt.legend(['Loss', 'Accuracy'], loc='upper left')
    plt.show()

    test_loss, test_acc = model.evaluate(tst, tstLabel,steps=10)



    print('\nTest accuracy:', test_acc)
Exemple #31
0
 def build(self, input_shape):
     self.input_spec = [InputSpec(shape=input_shape)]
     shape = (input_shape[self.axis],)
     init_gamma = self.scale * np.ones(shape)
     self.gamma = K.variable(init_gamma, name='{}_gamma'.format(self.name))