Exemplo n.º 1
0
 def __init__(self, axis=1, normalize=True,
              l1=0., l2=0., **kwargs):
     self.axis = axis
     self.normalize = normalize
     self.l1 = K.cast_to_floatx(l1)
     self.l2 = K.cast_to_floatx(l2)
     self.uses_learning_phase = normalize
     super(BatchLoss, self).__init__(**kwargs)
Exemplo n.º 2
0
  def get_updates(self, loss, params):
    grads = self.get_gradients(loss, params)
    self.updates = []

    with tf.control_dependencies([tf.compat.v1.assign_add(self.iterations, 1)]):
      t = tf.cast(self.iterations, backend.floatx())

    # Due to the recommendations in [2], i.e. warming momentum schedule
    momentum_cache_t = self.beta_1 * (
        1. - 0.5 *
        (tf.pow(backend.cast_to_floatx(0.96), t * self.schedule_decay)))
    momentum_cache_t_1 = self.beta_1 * (
        1. - 0.5 *
        (tf.pow(backend.cast_to_floatx(0.96),
                      (t + 1) * self.schedule_decay)))
    m_schedule_new = self.m_schedule * momentum_cache_t
    m_schedule_next = self.m_schedule * momentum_cache_t * momentum_cache_t_1
    self.updates.append((self.m_schedule, m_schedule_new))

    ms, vs = self._create_all_weights(params)

    for p, g, m, v in zip(params, grads, ms, vs):
      # the following equations given in [1]
      g_prime = g / (1. - m_schedule_new)
      m_t = self.beta_1 * m + (1. - self.beta_1) * g
      m_t_prime = m_t / (1. - m_schedule_next)
      v_t = self.beta_2 * v + (1. - self.beta_2) * tf.square(g)
      v_t_prime = v_t / (1. - tf.pow(self.beta_2, t))
      m_t_bar = (1. -
                 momentum_cache_t) * g_prime + momentum_cache_t_1 * m_t_prime

      self.updates.append(tf.compat.v1.assign(m, m_t))
      self.updates.append(tf.compat.v1.assign(v, v_t))

      p_t = p - self.lr * m_t_bar / (backend.sqrt(v_t_prime) + self.epsilon)
      new_p = p_t

      # Apply constraints.
      if getattr(p, 'constraint', None) is not None:
        new_p = p.constraint(new_p)

      self.updates.append(tf.compat.v1.assign(p, new_p))
    return self.updates
Exemplo n.º 3
0
    def __init__(self, c1, c2, sigma, **kwargs):
        super(Brish, self).__init__(**kwargs)
        self.c1 = K.cast_to_floatx(c1)
        self.c2 = K.cast_to_floatx(c2)
        self.sigma = K.cast_to_floatx(sigma)

        self.cp = K.cast_to_floatx((self.c1 + self.c2) / 2)
        self.cm = K.cast_to_floatx((self.c1 - self.c2) / 2)
        self.cms = K.cast_to_floatx(
            (self.c1 - self.c2) / np.sqrt(2 * np.pi) * self.sigma)
        self.s1 = K.cast_to_floatx(np.sqrt(2) * self.sigma)
        self.s2 = K.cast_to_floatx(2 * self.sigma**2)
Exemplo n.º 4
0
def Model(X, y, inputSize):
    X = K.cast_to_floatx(X)
    y = K.cast_to_floatx(y)
    # define the keras model
    model = Sequential()
    model.add(Dense(12, input_dim=inputSize, activation='relu'))
    model.add(Dense(8, activation='relu'))
    model.add(Dense(1, activation='sigmoid'))
    # compile the keras model
    model.compile(loss='binary_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])
    # fit the keras model on the dataset
    model.fit(X, y, epochs=1, batch_size=10)
    # evaluate the keras model
    _, accuracy = model.evaluate(X, y)

    print('Accuracy: %.2f' % (accuracy * 100))
    return model
Exemplo n.º 5
0
    def get_constants(self, inputs, training=None):
        constants = []
        constants.append([K.cast_to_floatx(1.) for _ in range(3)])

        if 0. < self.recurrent_dropout < 1:
            ones = K.ones_like(K.reshape(inputs[:, 0, 0], (-1, 1)))
            ones = K.tile(ones, (1, self.units))

            def dropped_inputs():
                return K.dropout(ones, self.recurrent_dropout)

            rec_dp_mask = [
                K.in_train_phase(dropped_inputs, ones, training=training)
                for _ in range(3)
            ]
            constants.append(rec_dp_mask)
        else:
            constants.append([K.cast_to_floatx(1.) for _ in range(3)])
        return constants
Exemplo n.º 6
0
    def get_updates(self, loss, params):
        grads = self.get_gradients(loss, params)
        self.updates = [K.update_add(self.iterations, 1)]

        t = K.cast(self.iterations, K.floatx()) + 1

        # Due to the recommendations in [2], i.e. warming momentum schedule
        momentum_cache_t = self.beta_1 * (1. - 0.5 * (K.pow(K.cast_to_floatx(0.96), t * self.schedule_decay)))
        momentum_cache_t_1 = self.beta_1 * (1. - 0.5 * (K.pow(K.cast_to_floatx(0.96), (t + 1) * self.schedule_decay)))
        m_schedule_new = self.m_schedule * momentum_cache_t
        m_schedule_next = self.m_schedule * momentum_cache_t * momentum_cache_t_1
        self.updates.append((self.m_schedule, m_schedule_new))

        shapes = [K.int_shape(p) for p in params]
        ms = [K.zeros(shape) for shape in shapes]
        vs = [K.zeros(shape) for shape in shapes]

        self.weights = [self.iterations] + ms + vs

        for p, g, m, v in zip(params, grads, ms, vs):
            # the following equations given in [1]
            g_prime = g / (1. - m_schedule_new)
            m_t = self.beta_1 * m + (1. - self.beta_1) * g
            m_t_prime = m_t / (1. - m_schedule_next)
            if np.random.choice([1, -1]) == 1:
                v_t = self.beta_2 * v + (1. - self.beta_2) * K.square(g)
            else:
                v_t = K.maximum(self.beta_2 * v, K.abs(g))
            v_t_prime = v_t / (1. - K.pow(self.beta_2, t))
            m_t_bar = (1. - momentum_cache_t) * g_prime + momentum_cache_t_1 * m_t_prime

            self.updates.append(K.update(m, m_t))
            self.updates.append(K.update(v, v_t))

            p_t = p - self.lr * m_t_bar / (K.sqrt(v_t_prime) + self.epsilon)
            new_p = p_t

            # Apply constraints.
            if getattr(p, 'constraint', None) is not None:
                new_p = p.constraint(new_p)

            self.updates.append(K.update(p, new_p))
        return self.updates
Exemplo n.º 7
0
 def __init__(self,
              beta_init=0.1,
              weights=None,
              activity_regularizer=None,
              **kwargs):
     self.supports_masking = True
     self.beta_init = K.cast_to_floatx(beta_init)
     self.initial_weights = weights
     self.activity_regularizer = regularizers.get(activity_regularizer)
     super(ParametricSigmoid, self).__init__(**kwargs)
Exemplo n.º 8
0
 def __init__(self, theta=1.0, **kwargs):
     super(ThresholdedReLU, self).__init__(**kwargs)
     if theta is None:
         raise ValueError('Theta of a Thresholded ReLU layer cannot be '
                          'None, requires a float. Got %s' % theta)
     if theta < 0:
         raise ValueError('The theta value of a Thresholded ReLU layer '
                          'should be >=0, got %s' % theta)
     self.supports_masking = True
     self.theta = backend.cast_to_floatx(theta)
Exemplo n.º 9
0
def load_tinyimagenet(tinyimagenet_path='./'):
    images = [plt.imread(fp) for fp in glob(os.path.join(tinyimagenet_path, '*.jpg'))]

    for i in range(len(images)):
        if len(images[i].shape) != 3:
            images[i] = np.stack([images[i], images[i], images[i]], axis=-1)

    images = np.stack(images)
    images = normalize_minus1_1(K.cast_to_floatx(images))
    return images
Exemplo n.º 10
0
 def get_constants(self, x):
     constants = []
     if 0 < self.dropout_U < 1:
         ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
         ones = K.tile(ones, (1, self.output_dim))
         B_U = K.in_train_phase(K.dropout(ones, self.dropout_U), ones)
         constants.append(B_U)
     else:
         constants.append(K.cast_to_floatx(1.))
     if self.consume_less == 'cpu' and 0 < self.dropout_W < 1:
         input_shape = self.input_spec[0].shape
         input_dim = input_shape[-1]
         ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
         ones = K.tile(ones, (1, int(input_dim)))
         B_W = K.in_train_phase(K.dropout(ones, self.dropout_W), ones)
         constants.append(B_W)
     else:
         constants.append(K.cast_to_floatx(1.))
     return constants
Exemplo n.º 11
0
 def get_constants(self, x):
     constants = []
     if 0 < self.dropout_U < 1:
         ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
         ones = K.concatenate([ones] * self.output_dim, 1)
         B_U = K.in_train_phase(K.dropout(ones, self.dropout_U), ones)
         constants.append(B_U)
     else:
         constants.append(K.cast_to_floatx(1.))
     if self.consume_less == 'cpu' and 0 < self.dropout_W < 1:
         input_shape = self.input_spec[0].shape
         input_dim = input_shape[-1]
         ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
         ones = K.concatenate([ones] * input_dim, 1)
         B_W = K.in_train_phase(K.dropout(ones, self.dropout_W), ones)
         constants.append(B_W)
     else:
         constants.append(K.cast_to_floatx(1.))
     return constants
Exemplo n.º 12
0
    def get_constants(self, inputs, training=None):
        constants = []
        input_dim = K.int_shape(inputs)[-1]
        
        if 0.0 < self.dropout < 1.0:
            ones = K.ones_like(K.reshape(inputs[:, 0, 0], (-1, 1)))
            ones = K.tile(ones, (1, input_dim))
            
            def dropped_inputs():
                return K.dropout(ones, self.dropout)

            dp_mask = K.in_train_phase(dropped_inputs,
                                            ones,
                                            training=training)
                                                
            constants.append(dp_mask)
            
        else:
            dp_mask = K.cast_to_floatx(1.)
            constants.append(dp_mask)
            
            
        if 0 < self.recurrent_dropout < 1:
            ones = K.ones_like(K.reshape(inputs[:, 0, 0], (-1, 1)))
            ones = K.tile(ones, (1, self.units))
            
            def dropped_inputs():
                return K.dropout(ones, self.recurrent_dropout)

            rec_dp_mask = [ K.in_train_phase(dropped_inputs,
                                            ones,
                                            training=training)
                            for _ in range(self.depth - 1) ]
                                                
            constants.append(rec_dp_mask)
            
        else:
            rec_dp_mask = [ K.cast_to_floatx(1.) for _ in range(self.depth - 1) ]
            constants.append(rec_dp_mask)

            
        return constants
Exemplo n.º 13
0
    def get_constants(self, x):
        constants = []
        if 0 < self.dropout_U < 1:
            ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
            ones = K.concatenate([ones] * self.output_dim, 1)
            B_U = [K.in_train_phase(K.dropout(ones, self.dropout_U), ones) for _ in range(3)]
            constants.append(B_U)
        else:
            constants.append([K.cast_to_floatx(1.) for _ in range(3)])

        if 0 < self.dropout_W < 1:
            input_shape = self.input_spec[0].shape
            input_dim = input_shape[-1]
            ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
            ones = K.concatenate([ones] * input_dim, 1)
            B_W = [K.in_train_phase(K.dropout(ones, self.dropout_W), ones) for _ in range(3)]
            constants.append(B_W)
        else:
            constants.append([K.cast_to_floatx(1.) for _ in range(3)])
        return constants
Exemplo n.º 14
0
def bbalpha_softmax_cross_entropy_with_mc_logits(alpha):
    alpha = K.cast_to_floatx(alpha)
    def loss(y_true, mc_logits):
        # log(p_ij), p_ij = softmax(logit_ij)
        #assert mc_logits.ndim == 3
        mc_log_softmax = mc_logits - K.max(mc_logits, axis=2, keepdims=True)
        mc_log_softmax = mc_log_softmax - K.log(K.sum(K.exp(mc_log_softmax), axis=2, keepdims=True))
        mc_ll = K.sum(y_true * mc_log_softmax, -1)  # N x K
        K_mc = mc_ll.get_shape().as_list()[1]	# only for tensorflow
        return - 1. / alpha * (logsumexp(alpha * mc_ll, 1) + K.log(1.0 / K_mc))
    return loss
Exemplo n.º 15
0
 def get_constants(self, x, training=None):
     constants = []
     if 0 < self.recurrent_dropout < 1:
         ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
         ones = K.concatenate([ones] * self.units, 1)
         B_U = K.in_train_phase(K.dropout(ones, self.recurrent_dropout),
                                ones)
         constants.append(B_U)
     else:
         constants.append(K.cast_to_floatx(1.))
     return constants
Exemplo n.º 16
0
    def get_constants(self, x):
        constants = []
        if 0 < self.dropout_U < 1:
            ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
            ones = K.tile(ones, (1, self.output_dim))
            B_U = [K.in_train_phase(K.dropout(ones, self.dropout_U), ones) for _ in range(3)]
            constants.append(B_U)
        else:
            constants.append([K.cast_to_floatx(1.) for _ in range(3)])

        if 0 < self.dropout_W < 1:
            input_shape = K.int_shape(x)
            input_dim = input_shape[-1]
            ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
            ones = K.tile(ones, (1, int(input_dim)))
            B_W = [K.in_train_phase(K.dropout(ones, self.dropout_W), ones) for _ in range(3)]
            constants.append(B_W)
        else:
            constants.append([K.cast_to_floatx(1.) for _ in range(3)])
        return constants
Exemplo n.º 17
0
def binary_crossentropy(y_true, y_pred, from_logits=False, label_smoothing=0):
    y_pred = K.constant(y_pred) if not K.is_tensor(y_pred) else y_pred
    y_true = K.cast(y_true, y_pred.dtype)
    if label_smoothing is not 0:
        smoothing = K.cast_to_floatx(label_smoothing)
        y_true = K.switch(K.greater(smoothing, 0), lambda: y_true *
                          (1.0 - smoothing) + 0.5 * smoothing, lambda: y_true)
    return K.mean(K.binary_crossentropy(y_true,
                                        y_pred,
                                        from_logits=from_logits),
                  axis=-1)
    def __init__(self, k, b, wtype='random', randseed='none'):
        self.k = K.cast_to_floatx(k)
        self.uses_learning_phase = True
        self.wtype = wtype
        self.w = None
        self.p = None
        self.b = b

        if randseed == 'time':
            import time
            np.random.seed(int(time.time()))
Exemplo n.º 19
0
    def __init__(self, n_bases=10, spline_order=3, l2_smooth=0., l2=0.):
        """Regularizer for GAM's

        # Arguments
            n_bases: number of b-spline bases
            order: spline order (2 for quadratic, 3 for qubic splines)
            l2_smooth: float; Smoothness penalty (penalize w' * S * w)
            l2: float; L2 regularization factor - overall weights regularizer
        """
        # convert S to numpy-array if it's a list

        self.n_bases = n_bases
        self.spline_order = spline_order
        self.l2_smooth = K.cast_to_floatx(l2_smooth)
        self.l2 = K.cast_to_floatx(l2)

        # convert to K.constant
        self.S = K.constant(
            K.cast_to_floatx(get_S(n_bases, spline_order,
                                   add_intercept=False)))
Exemplo n.º 20
0
    def get_constants(self, x):
        constants = []
        if 0 < self.dropout_U < 1:
            ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
            ones = K.concatenate([ones] * self.output_dim, 1)
            B_U = [K.in_train_phase(K.dropout(ones, self.dropout_U), ones) for _ in range(4)]
            constants.append(B_U)
        else:
            constants.append([K.cast_to_floatx(1.) for _ in range(4)])

        if 0 < self.dropout_W < 1:
            input_shape = self.input_spec[0].shape
            input_dim = input_shape[-1]
            ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
            ones = K.concatenate([ones] * input_dim, 1)
            B_W = [K.in_train_phase(K.dropout(ones, self.dropout_W), ones) for _ in range(4)]
            constants.append(B_W)
        else:
            constants.append([K.cast_to_floatx(1.) for _ in range(4)])
        return constants
Exemplo n.º 21
0
 def call(self, inputs, memory=None):
     if memory is None:
         mem = K.zeros(
             (K.shape(inputs)[0], self.mem_size, K.shape(inputs)[-1]))
     else:
         mem = K.variable(K.cast_to_floatx(memory))
     inputs = K.concatenate([mem, inputs], axis=1)
     ret = super(CausalConv, self).call(inputs)
     if self.return_memory:
         ret = ret, inputs[:, :self.mem_size, :]
     return ret
Exemplo n.º 22
0
 def __init__(self, theta=1.0, **kwargs):
     super(ThresholdedReLU, self).__init__(**kwargs)
     if theta is None:
         raise ValueError(
             'Theta of a Thresholded ReLU layer cannot be None, expecting a float.'
             f' Received: {theta}')
     if theta < 0:
         raise ValueError('The theta value of a Thresholded ReLU layer '
                          f'should be >=0. Received: {theta}')
     self.supports_masking = True
     self.theta = backend.cast_to_floatx(theta)
Exemplo n.º 23
0
def get_data_gen():
    """
    Return train and val generators that give data in batches,
    and data label names.
    """

    # load data
    (X_train, y_train), (X_val, y_val) = cifar10.load_data()

    if K.image_data_format() == 'channels_first':
        X_train = X_train.transpose(0, 2, 3, 1)
        X_val = X_val.transpose(0, 2, 3, 1)

    X_train = K.cast_to_floatx(X_train)
    X_val = K.cast_to_floatx(X_val)

    y_train = to_categorical(y_train)
    y_val = to_categorical(y_val)

    X_train -= mean
    X_val -= mean

    X_train /= std
    X_val /= std

    # train generator
    train_transforms = [
        partial(random_crop, new_size=32, padding=4),
        partial(horizontal_flip, f=0.5)
    ]

    train_generator = ArrayDataGenerator(X_train,
                                         y_train,
                                         transforms=train_transforms,
                                         shuffle=True,
                                         seed=28)

    # validation generator
    val_generator = ArrayDataGenerator(X_val, y_val, shuffle=False)

    return train_generator, val_generator
Exemplo n.º 24
0
 def reg_insidebatch(lastoutput,current): #function to be called by tf.scan, which will loop through the batch dimension
     regLoss=K.cast_to_floatx(0.)
     [height, width, channels]=K.int_shape(current)
     for i in range(channels):
         Wi=K.flatten(current[:,:,i])
         for j in range(channels): 
             Wj=K.flatten(current[:,:,j])
             if j != i: 
                 tempLoss=K.sqrt(K.square(K.sum(Wi * Wj,axis=-1,keepdims=False))) 
                 regLoss+=tempLoss
     regLoss+=lastoutput # add the output from the current 3D tensor to the preceding batch loop           
     return regLoss            
Exemplo n.º 25
0
    def concrete_dropout(self, x):
        eps = K.cast_to_floatx(K.epsilon())
        temp = 1.0 / 10.0
        unif_noise = K.random_uniform(shape=K.shape(x))
        drop_prob = (K.log(self.p + eps) - K.log(1. - self.p + eps) +
                     K.log(unif_noise + eps) - K.log(1. - unif_noise + eps))
        drop_prob = K.sigmoid(drop_prob / temp)
        random_tensor = 1. - drop_prob

        retain_prob = 1. - self.p
        x *= random_tensor
        x /= retain_prob
        return x
Exemplo n.º 26
0
def weight_norm_regularizer(layer, weight):
    """Splits weight direction and norm to optimize them separately
    # Arguments
        l: Layer to apply w norm
        w: Float; Initial weights
    """
    w_norm = K.cast_to_floatx(np.linalg.norm(K.get_value(weight), axis=0))
    g = layer.add_weight(name="{}_{}_g".format(layer.name,
                                               weight.name.split(':')[-1]),
                         shape=w_norm.shape,
                         initializer=keras.initializers.Constant(w_norm))
    normed_weight = weight * (g / l2norm(weight))
    return normed_weight
Exemplo n.º 27
0
	def lossfn(self,y_true, y_pred):
		K.set_floatx('float32')
		Ploc_true = (y_true[0])
		mu_true = (y_true[1])
		Ploc_pred = (y_pred[0])
		mu_pred = (y_pred[1])
		print(y_pred[1])
		sigma = K.cast_to_floatx(np.diag(np.full((2*self.F,), 0.005)))
		cost = (-tf.log(Ploc_pred) + 
				(0.5) * tf.log(tf.matrix_determinant(sigma)) + 
				(0.5) * K.dot(tf.transpose(mu_true - mu_pred), K.dot(tf.matrix_inverse(sigma), (mu_true - mu_pred))) + 
				tf.log(2 * m.pi))
		return cost
Exemplo n.º 28
0
    def get_constants(self, x):
        print("begin get_constants(self, x)")
        constants = []
        if 0 < self.dropout_U < 1:
            ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
            ones = K.tile(ones, (1, self.controller_output_dim))
            B_U = [
                K.in_train_phase(K.dropout(ones, self.dropout_U), ones)
                for _ in range(4)
            ]
            constants.append(B_U)
        else:
            constants.append([K.cast_to_floatx(1.) for _ in range(4)])

        if 0 < self.dropout_W < 1:
            input_shape = self.input_spec[0].shape
            input_dim = input_shape[-1]
            ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
            ones = K.tile(ones, (1, int(input_dim)))
            B_W = [
                K.in_train_phase(K.dropout(ones, self.dropout_W), ones)
                for _ in range(4)
            ]
            constants.append(B_W)
        else:
            constants.append([K.cast_to_floatx(1.) for _ in range(4)])

        # if 0 < self.dropout_R < 1:
        #     input_shape = self.input_spec[0].shape
        #     input_dim = input_shape[-1]
        #     ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
        #     ones = K.tile(ones, (1, int(input_dim)))
        #     B_R = [K.in_train_phase(K.dropout(ones, self.dropout_R), ones) for _ in range(4)]
        #     constants.append(B_R)
        # else:
        #     constants.append([K.cast_to_floatx(1.) for _ in range(4)])

        print("end get_constants(self, x)")
        return constants
Exemplo n.º 29
0
def Emu(para_array):
    if len(para_array.shape) == 1:
        para_array_rescaled = scale01(lhdmin, lhdmax, para_array)
        W_predArray, _ = GPy_predict(
            np.expand_dims(para_array_rescaled, axis=0))
        W_predArray = W_predArray.reshape(latent_dim, -1).T
        #         W_predArray = K.cast_to_floatx(W_predArray)
        #         W_predArray = K.cast(W_predArray,K.floatx())
        #         W_predArray = tf.convert_to_tensor(W_predArray, dtype=tf.float32)
        W_predArray = K.cast_to_floatx(W_predArray)
        x_decoded = decoder.predict(W_predArray)
        x_decoded_scaled = (normFactor * x_decoded) + meanFactor
        return np.squeeze(x_decoded_scaled)  #[0]
Exemplo n.º 30
0
def categorical_crossentropy(y_true, y_pred, from_logits=False, label_smoothing=0):
    y_pred = K.constant(y_pred) if not K.is_tensor(y_pred) else y_pred
    y_true = K.cast(y_true, y_pred.dtype)

    if label_smoothing is not 0:
        smoothing = K.cast_to_floatx(label_smoothing)

        def _smooth_labels():
            num_classes = K.cast(K.shape(y_true)[1], y_pred.dtype)
            return y_true * (1.0 - smoothing) + (smoothing / num_classes)

        y_true = K.switch(K.greater(smoothing, 0), _smooth_labels, lambda: y_true)
    return K.categorical_crossentropy(y_true, y_pred, from_logits=from_logits)
Exemplo n.º 31
0
def set_layer_sharpness(model, values):
    """Sets the sharpness values of all spiking layers.

    # Arguments
        model: Keras model with one or more Spiking layers.
        values: A list of sharpness values (between 0.0 and 1.0 inclusive) for each
            spiking layer in the same order as their indices.
    """
    assert type(values) is list and all(
        [type(i) is float and i >= 0.0 and i <= 1.0 for i in values])
    for i, v in enumerate(values):
        layer = model.layers[get_spiking_layer_indices(model=model)[i]]
        K.set_value(layer.sharpness, K.cast_to_floatx(v))
Exemplo n.º 32
0
def get_data_gen():
    """
    Return train and val generators that give data in batches,
    and data label names.
    """

    # load data
    (X_train, y_train), (X_val, y_val) = cifar10.load_data()

    if K.image_data_format() == 'channels_first':
        X_train = X_train.transpose(0, 2, 3, 1)
        X_val = X_val.transpose(0, 2, 3, 1)

    X_train = K.cast_to_floatx(X_train)
    X_val = K.cast_to_floatx(X_val)

    y_train = to_categorical(y_train)
    y_val = to_categorical(y_val)

    X_train -= mean
    X_val -= mean

    X_train /= std
    X_val /= std

    # train generator
    train_transforms = [
        partial(random_crop, new_size=32, padding=4),
        partial(horizontal_flip, f=0.5)
    ]

    train_generator = ArrayDataGenerator(X_train, y_train,
                                         transforms=train_transforms,
                                         shuffle=True, seed=28)

    # validation generator
    val_generator = ArrayDataGenerator(X_val, y_val, shuffle=False)

    return train_generator, val_generator
Exemplo n.º 33
0
 def __init__(self, l2_reg=0., s_ll_reg=0., S_ll=None, orth_reg=0., embedding_dim=0, reshape=None, mask_value=None):
     """
     Custom regularizer used for the last layer of a SimEc
     s_ll_reg enforces that W^TW approximates S,
     orth_reg enforces that WW^T approximates lambda*I, i.e. that the vectors are orthogonal (but not necessarily length 1)
     """
     self.l2_reg = K.cast_to_floatx(l2_reg)
     self.s_ll_reg = K.cast_to_floatx(s_ll_reg)
     if s_ll_reg > 0.:
         assert (S_ll is not None), "need to give S_ll"
         self.S_ll = S_ll
     else:
         self.S_ll = None
     self.orth_reg = K.cast_to_floatx(orth_reg)
     if orth_reg > 0.:
         assert (embedding_dim > 0), "need to give shape of embedding layer, i.e. x.shape[0]"
         self.embedding_dim = embedding_dim
     self.reshape = reshape
     if mask_value is None:
         self.errfun = mean_squared_error
     else:
         self.errfun = masked_mse(mask_value)
Exemplo n.º 34
0
def load_g10():
    X_train, y_train = galaxy10.load_data()

    actual_X_train = np.zeros((X_train.shape[0], X_train.shape[1] + 1,
                               X_train.shape[2] + 1, X_train.shape[3]))

    for idx, image in enumerate(X_train):
        padded = np.pad(image, 1, mode='edge')
        actual_X_train[idx] = padded[1:, 1:, 1:-1]

    X_train = normalize_minus1_1(cast_to_floatx(actual_X_train))
    # X_test = normalize_minus1_1(cast_to_floatx(X_test))
    return (X_train[:1000], y_train[:1000]), (X_train[:1000], y_train[:1000])
Exemplo n.º 35
0
    def get_constants(self, inputs, training=None):
        constants = []
        if 0. < self.recurrent_dropout < 1.:
            ones = K.ones_like(K.reshape(inputs[:, 0, 0], (-1, 1)))
            ones = K.tile(ones, (1, self.units))

            def dropped_inputs():
                return K.dropout(ones, self.recurrent_dropout)

            rec_dp_mask = [K.in_train_phase(dropped_inputs,
                                            ones,
                                            training=training) for _ in range(3)]
            constants.append(rec_dp_mask)
        else:
            constants.append([K.cast_to_floatx(1.) for _ in range(3)])
        return constants
Exemplo n.º 36
0
def get_test_gen(dtype='val'):

    if dtype != 'val':
        raise ValueError("CIFAR10 only has validation data.")

    _, (X_val, y_val) = cifar10.load_data()

    if K.image_data_format() == 'channels_first':
        X_val = X_val.transpose(0, 2, 3, 1)

    X_val = K.cast_to_floatx(X_val)
    y_val = to_categorical(y_val)

    X_val -= mean
    X_val /= std

    val_generator = ArrayDataGenerator(X_val, y_val, shuffle=False)

    return val_generator
Exemplo n.º 37
0
    def concrete_dropout(self, x):
        '''
        Concrete dropout - used at training time (gradients can be propagated)
        :param x: input
        :return:  approx. dropped out input
        '''
        eps = K.cast_to_floatx(K.epsilon())
        temp = 0.1

        unif_noise = K.random_uniform(shape=K.shape(x))
        drop_prob = (
            K.log(self.p + eps)
            - K.log(1. - self.p + eps)
            + K.log(unif_noise + eps)
            - K.log(1. - unif_noise + eps)
        )
        drop_prob = K.sigmoid(drop_prob / temp)
        random_tensor = 1. - drop_prob

        retain_prob = 1. - self.p
        x *= random_tensor
        x /= retain_prob
        return x
Exemplo n.º 38
0
 def __init__(self, gamma=1., lam=10., axis='last'):
     self.gamma = K.cast_to_floatx(gamma)
     self.lam = K.cast_to_floatx(lam)
     self.axis = axis
Exemplo n.º 39
0
    def __init__(self, TV=0., TV2=0., axes=[0, 1]):

        self.TV = K.cast_to_floatx(TV)
        self.TV2 = K.cast_to_floatx(TV2)
        self.axes = list(axes)
Exemplo n.º 40
0
 def __init__(self, gamma=0., axis=1, division_idx=None):
     self.gamma = K.cast_to_floatx(gamma)
     self.axis = []
     self.axis.append(axis)
     self.division_idx = division_idx
Exemplo n.º 41
0
 def __init__(self, l1=0., l2=0., axis=0):
     self.l1 = K.cast_to_floatx(l1)
     self.l2 = K.cast_to_floatx(l2)
     self.axis = []
     self.axis.append(axis)
Exemplo n.º 42
0
 def __init__(self, l1=0., l2=0., axis=0):
     self.l1 = K.cast_to_floatx(l1)
     self.l2 = K.cast_to_floatx(l2)
     self.axis = axis
Exemplo n.º 43
0
	def __init__(self, l=0., p=0.1):
		self.l = K.cast_to_floatx(l)
		self.p = K.cast_to_floatx(p)
		self.uses_learning_phase = True
Exemplo n.º 44
0
 def __init__(self, l1=0., l2=0.):
     self.l1 = K.cast_to_floatx(l1)
     self.l2 = K.cast_to_floatx(l2)
     self.uses_learning_phase = False
     self.layer = None
Exemplo n.º 45
0
 def __init__(self, gamma=1., lam=10., division_idx=None):
     self.gamma = K.cast_to_floatx(gamma)
     self.lam = K.cast_to_floatx(lam)
     self.division_idx = division_idx
Exemplo n.º 46
0
	def __init__(self, beta_init=0.1, weights=None, activity_regularizer=None, **kwargs):
		self.supports_masking = True
		self.beta_init = K.cast_to_floatx(beta_init)
		self.initial_weights = weights
		self.activity_regularizer = regularizers.get(activity_regularizer)
		super(ParametricSigmoid, self).__init__(**kwargs)
 def __init__(self, m=0., **kwargs):
     self.m = K.cast_to_floatx(m)
     super(ManifoldWeightRegularizer, self).__init__(**kwargs)
Exemplo n.º 48
0
 def __init__(self, l1=0., l2=0.,p_mask=None):
     self.l1 = K.cast_to_floatx(l1)
     self.l2 = K.cast_to_floatx(l2)
     self.p_mask = K.cast_to_floatx(p_mask)
     self.uses_learning_phase = True
Exemplo n.º 49
0
 def get_constants(self, x):
     constants = []
     constants.append([K.cast_to_floatx(1.) for _ in range(4)])
     constants.append([K.cast_to_floatx(1.) for _ in range(4)])
     return constants