예제 #1
0
def compute_im2( x, x_cf, y, y_t, ae_models ):
    
    '''
    || AE_t(xcf) - AE(xcf) || / |xcf|
    '''
    x= x.detach().cpu().numpy()
    x_cf= x_cf.detach().cpu().numpy()
    x=np.reshape(x,(x.shape[0],28,28,1))
    x_cf= np.reshape(x_cf,(x_cf.shape[0],28,28,1))
    
    cf_score= np.zeros((x_cf.shape[0]))
    cf_score_num= np.zeros((x_cf.shape[0]))
    cf_score_denom= np.zeros((x_cf.shape[0]))
    for idx in range(x_cf.shape[0]):
        x_i= K.constant( np.reshape( x_cf[idx, :], (1, 28, 28, 1) ) )
        
        #Comptuting score for counterfactual with target class autoencoder        
        y_i= int(y_t[idx])
        model= ae_models[y_i]
        # The last model in the list is the all class trained auto encoder
        model_all= ae_models[-1]
        cf_score[idx], cf_score_num[idx], cf_score_denom[idx] = K.eval( ae_reconstruct_loss_im2( model, model_all, x_i ) )

    return np.mean(cf_score), np.mean(cf_score_num), np.mean(cf_score_denom)
예제 #2
0
def correlation_gm(y_true, y_pred, sample_weight=None):

    sz = K.ndim(y_true)
    gm = nibabel.load(
        '/data/Templates/Yeo2011_17Networks_2mm_LiberalMask_64.nii.gz'
    ).get_fdata()

    if K.eval(sz) == 5:
        gm = np.expand_dims(gm, axis=[0, -1])

    gm = tf.cast(gm, tf.bool)

    #### GM Mask ####
    y_true = tf.boolean_mask(y_true, gm)
    y_pred = tf.boolean_mask(y_pred, gm)

    mean_ytrue = tf.reduce_mean(y_true, keepdims=True)
    mean_ypred = tf.reduce_mean(y_pred, keepdims=True)

    demean_ytrue = y_true - mean_ytrue
    demean_ypred = y_pred - mean_ypred

    if sample_weight is not None:
        sample_weight = tf.broadcast_weights(sample_weight, y_true)
        std_y = tf.sqrt(
            tf.reduce_sum(sample_weight * tf.square(demean_ytrue)) *
            tf.reduce_sum(sample_weight * tf.square(demean_ypred)))
        correlation = tf.reduce_sum(
            sample_weight * demean_ytrue * demean_ypred) / std_y
    else:
        std_y = tf.sqrt(
            tf.reduce_sum(tf.square(demean_ytrue)) *
            tf.reduce_sum(tf.square(demean_ypred)))
        correlation = tf.reduce_sum(demean_ytrue * demean_ypred) / std_y

    return tf.maximum(tf.minimum(correlation, 1.0), -1.0)
예제 #3
0
    def test_geometric_alignment(self):
        # Create test samples
        y_true, y_pred = create_y_true_y_pred()

        loss = losses.geometric_alignment(shape_points=2)(K.variable(y_true),
                                                          K.variable(y_pred))

        loss_tf = K.eval(loss)

        x = np.reshape(y_pred[0, 7:7 + 3 * 2], (2, 3)).T
        x_gt = np.reshape(y_true[0, 7:7 + 3 * 2], (2, 3)).T

        t = spatial_geometry.quaternion_translation_to_pose(
            y_pred[0, :4], y_pred[0, 4:7])
        t_gt = spatial_geometry.quaternion_translation_to_pose(
            y_true[0, :4], y_true[0, 4:7])

        xt = np.dot(t[:3, :3], x) + np.expand_dims(t[:3, 3], axis=1)
        xt_gt = np.dot(t_gt[:3, :3], x_gt) + np.expand_dims(t_gt[:3, 3],
                                                            axis=1)

        loss_np = np.mean(np.mean(np.abs(xt_gt - xt), axis=0), axis=-1)

        self.assertAlmostEqual(loss_tf[0], loss_np, places=4)
예제 #4
0
    def on_epoch_end(self, epoch, logs):
        
        self.train_data.shuffle()
        if epoch % self.val_step != 0:
            return

        # validate
        psnr = 0.0
        pbar = ProgressBar(len(self.val_data))
        for i, (lr, hr) in enumerate(self.val_data):
            sr = self.model(lr)
            sr_numpy = K.eval(sr)
            psnr += self.calc_psnr((sr_numpy).squeeze(), (hr).squeeze())
            pbar.update('')
        psnr = round(psnr / len(self.val_data), 4)
        loss = round(logs['loss'], 4)

        # save best status
        if psnr >= self.best_psnr:
            self.best_psnr = psnr
            self.best_epoch = epoch
            self.model.save(self.ckp_path, overwrite=True, include_optimizer=True, save_format='tf')
            state = {
                'current_epoch': epoch,
                'best_epoch': self.best_epoch,
                'best_psnr': self.best_psnr
            }

            with open(self.state_path, 'wb') as f:
                pickle.dump(state, f)
            
        self.lg.info('Epoch: {:4} | PSNR: {:.2f} | Loss: {:.4f} | lr: {:.2e} | Best_PSNR: {:.2f} in Epoch [{}]'.format(epoch, psnr, loss, K.get_value(self.model.optimizer.lr), self.best_psnr, self.best_epoch))

        # record tensorboard
        self.writer.add_scalar('train_loss', loss, epoch)
        self.writer.add_scalar('val_psnr', psnr, epoch)
예제 #5
0
    def recognize(self, img, outputs, class_names, vgg_face):
        person_rep = dict()
        person_names = ["angelamerkel", "jinping", "trump"]
        for person in person_names:
            embed = np.loadtxt(person + ".txt")
            person_rep[person] = embed
        boxes, objectness, classes, nums = outputs
        boxes, objectness, classes, nums = boxes[0], objectness[0], classes[
            0], nums[0]
        wh = np.flip(img.shape[0:2])
        for i in range(nums):
            x1y1 = tuple((np.array(boxes[i][0:2]) * wh).astype(np.int32))
            x2y2 = tuple((np.array(boxes[i][2:4]) * wh).astype(np.int32))
            if class_names[int(classes[i])] == "face":

                img_crop = img[x1y1[1]:x2y2[1], x1y1[0]:x2y2[0]]

                crop_img = img_to_array(img_crop)
                crop_img = np.expand_dims(crop_img, axis=0)
                crop_img = preprocess_input(crop_img)

                img_encode = vgg_face(transform_images(crop_img, 224))
                embed = K.eval(img_encode)
                name, score = self.get_match(person_rep, embed, 0.30)

                img = cv2.rectangle(img, x1y1, x2y2, (205, 0, 0), 2)
                img = cv2.putText(img, '{} {:.4f}'.format(name, score), x1y1,
                                  cv2.FONT_HERSHEY_COMPLEX_SMALL, 1,
                                  (255, 0, 255), 2)
            else:
                img = cv2.rectangle(img, x1y1, x2y2, (255, 0, 0), 2)
                img = cv2.putText(
                    img, '{} {:.4f}'.format(class_names[int(classes[i])],
                                            objectness[i]), x1y1,
                    cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 0, 255), 2)
        return img
예제 #6
0
    def loss(self,
             x: np.ndarray,
             y: np.ndarray,
             reduction: str = "none",
             **kwargs) -> np.ndarray:
        """
        Compute the loss of the neural network for samples `x`.

        :param x: Samples of shape (nb_samples, nb_features) or (nb_samples, nb_pixels_1, nb_pixels_2,
                  nb_channels) or (nb_samples, nb_channels, nb_pixels_1, nb_pixels_2).
        :param y: Target values (class labels) one-hot-encoded of shape `(nb_samples, nb_classes)` or indices
                  of shape `(nb_samples,)`.
        :param reduction: Specifies the reduction to apply to the output: 'none' | 'mean' | 'sum'.
                   'none': no reduction will be applied
                   'mean': the sum of the output will be divided by the number of elements in the output,
                   'sum': the output will be summed.
        :return: Loss values.
        :rtype: Format as expected by the `model`
        """
        if not self._losses:
            raise NotImplementedError(
                "loss method is only supported for keras versions >= 2.3.1")

        if self.is_tensorflow:
            import tensorflow.keras.backend as k
        else:
            import keras.backend as k

        x_preprocessed, y_preprocessed = self._apply_preprocessing(x,
                                                                   y,
                                                                   fit=False)
        shape_match = [
            i is None or i == j
            for i, j in zip(self._input_shape, x_preprocessed.shape[1:])
        ]
        if not all(shape_match):
            raise ValueError(
                "Error when checking x: expected preprocessed x to have shape {} but got array with "
                "shape {}.".format(self._input_shape,
                                   x_preprocessed.shape[1:]))

        # Adjust the shape of y for loss functions that do not take labels in one-hot encoding
        if self._reduce_labels:
            y_preprocessed = np.argmax(y_preprocessed, axis=1)

        predictions = self._model.predict(x_preprocessed)

        if self._orig_loss and hasattr(self._orig_loss, "reduction"):
            prev_reduction = self._orig_loss.reduction
            self._orig_loss.reduction = self._losses.Reduction.NONE
            loss = self._orig_loss(y_preprocessed, predictions)
            self._orig_loss.reduction = prev_reduction
        else:
            prev_reduction = []
            predictions = k.constant(predictions)
            y_preprocessed = k.constant(y_preprocessed)
            for loss_function in self._model.loss_functions:
                prev_reduction.append(loss_function.reduction)
                loss_function.reduction = self._losses.Reduction.NONE
            loss = self._loss_function(y_preprocessed, predictions)
            for i, loss_function in enumerate(self._model.loss_functions):
                loss_function.reduction = prev_reduction[i]

        loss_value = k.eval(loss)

        if reduction == "none":
            pass
        elif reduction == "mean":
            loss_value = np.mean(loss_value, axis=0)
        elif reduction == "sum":
            loss_value = np.sum(loss_value, axis=0)

        return loss_value
예제 #7
0
 def get_kernel(self):
     return K.eval(self.kernel)
예제 #8
0
def main():

    dataset = load_dataset()

    train_data = np.asarray(dataset['train']['data'])
    train_labels = dataset['train']['label']
    num_classes = len(np.unique(train_labels))

    test_data = np.asarray(dataset['test']['data'])
    test_labels = dataset['test']['label']

    train_labels = to_categorical(train_labels, num_classes=num_classes)
    test_labels = to_categorical(test_labels, num_classes=num_classes)

    generator = dataset['generator']
    fs_generator = dataset['fs_generator']
    generator_kwargs = {
        'batch_size': batch_size
    }

    print('reps : ', reps)
    name = 'mnist_' + fs_network + '_r_' + str(regularization)
    print(name)
    model_kwargs = {
        'nclasses': num_classes,
        'regularization': regularization
    }

    total_features = int(np.prod(train_data.shape[1:]))

    fs_filename = directory + fs_network + '_trained_model.h5'
    classifier_filename = directory + classifier_network + '_trained_model.h5'
    if not os.path.isdir(directory):
        os.makedirs(directory)
    if not os.path.exists(fs_filename) and warming_up:
        np.random.seed(1001)
        tf.set_random_seed(1001)
        model = getattr(network_models, fs_network)(input_shape=train_data.shape[1:], **model_kwargs)
        print('training_model')
        model.fit_generator(
            generator.flow(train_data, train_labels, **generator_kwargs),
            steps_per_epoch=train_data.shape[0] // batch_size, epochs=110,
            callbacks=[
                callbacks.LearningRateScheduler(scheduler())
            ],
            validation_data=(test_data, test_labels),
            validation_steps=test_data.shape[0] // batch_size,
            verbose=verbose
        )

        model.save(fs_filename)
        del model
        K.clear_session()

    for e2efs_class in e2efs_classes:
        nfeats = []
        accuracies = []
        times = []

        cont_seed = 0

        for factor in [.05, .1, .25, .5]:
            n_features = int(total_features * factor)
            n_accuracies = []
            n_times = []

            for r in range(reps):
                print('factor : ', factor, ' , rep : ', r)
                np.random.seed(cont_seed)
                tf.set_random_seed(cont_seed)
                cont_seed += 1
                mask = (np.std(train_data, axis=0) > 1e-3).astype(int).flatten()
                classifier = load_model(fs_filename) if warming_up else getattr(network_models, fs_network)(input_shape=train_data.shape[1:], **model_kwargs)
                e2efs_layer = e2efs_class(n_features, input_shape=train_data.shape[1:], kernel_initializer=initializers.constant(mask))
                model = e2efs_layer.add_to_model(classifier, input_shape=train_data.shape[1:])

                optimizer = custom_optimizers.E2EFS_Adam(e2efs_layer=e2efs_layer, lr=1e-3)  # optimizers.adam(lr=1e-2)
                model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['acc'])
                model.fs_layer = e2efs_layer
                model.classifier = classifier
                model.summary()
                start_time = time.time()
                model.fit_generator(
                    fs_generator.flow(train_data, train_labels, **generator_kwargs),
                    steps_per_epoch=train_data.shape[0] // batch_size, epochs=20000,
                    callbacks=[
                        E2EFSCallback(verbose=verbose)
                    ],
                    validation_data=(test_data, test_labels),
                    validation_steps=test_data.shape[0] // batch_size,
                    verbose=verbose
                )
                fs_rank = np.argsort(K.eval(model.heatmap))[::-1]
                mask = np.zeros(train_data.shape[1:])
                mask.flat[fs_rank[:n_features]] = 1.
                # mask = K.eval(model.fs_kernel).reshape(train_data.shape[1:])
                n_times.append(time.time() - start_time)
                print('nnz : ', np.count_nonzero(mask))
                del model
                K.clear_session()
                model = load_model(classifier_filename) if warming_up else getattr(network_models, classifier_network)(
                    input_shape=train_data.shape[1:], **model_kwargs)
                optimizer = optimizers.Adam(lr=1e-2)  # optimizers.adam(lr=1e-2)
                model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['acc'])
                model.fit_generator(
                    generator.flow(mask * train_data, train_labels, **generator_kwargs),
                    steps_per_epoch=train_data.shape[0] // batch_size, epochs=80,
                    callbacks=[
                        callbacks.LearningRateScheduler(scheduler()),
                    ],
                    validation_data=(mask * test_data, test_labels),
                    validation_steps=test_data.shape[0] // batch_size,
                    verbose=verbose
                )
                n_accuracies.append(model.evaluate(mask * test_data, test_labels, verbose=0)[-1])
                del model
                K.clear_session()
            print(
                'n_features : ', n_features, ', acc : ', n_accuracies, ', time : ', n_times
            )
            accuracies.append(n_accuracies)
            nfeats.append(n_features)
            times.append(n_times)

        output_filename = directory + fs_network + '_' + classifier_network + '_' + e2efs_class.__name__ + \
                          '_results_warming_' + str(warming_up) + '.json'

        try:
            with open(output_filename) as outfile:
                info_data = json.load(outfile)
        except:
            info_data = {}

        if name not in info_data:
            info_data[name] = []

        info_data[name].append(
            {
                'regularization': regularization,
                'reps': reps,
                'classification': {
                    'n_features': nfeats,
                    'accuracy': accuracies,
                    'times': times
                }
            }
        )

        with open(output_filename, 'w') as outfile:
            json.dump(info_data, outfile)
예제 #9
0
    def on_epoch_end(self, epoch, logs={}):
        print(K.eval(self.model.get_layer('intaracting_caps').output[1]))

        with open(self.save_path,'wb') as handle:
            pickle.dump(self.routing_list,handle)
        self.routing_list=[]
예제 #10
0
 def get_config(self):
     config = {
         "transitions": K.eval(self.trans),
     }
     base_config = super(CRF, self).get_config()
     return dict(list(base_config.items()) + list(config.items()))
예제 #11
0
    """
    const = tf.constant(1.0, dtype=tf.float32)
    nse_alpha = K.std(predicted) / K.std(true)
    return tf.subtract(const, nse_alpha, name=name + '_LOSS')


if __name__ == "__main__":
    reset_graph()
    _true = np.random.random(10)
    pred = np.random.random(10)

    t = tf.convert_to_tensor(_true, dtype=tf.float32)
    p = tf.convert_to_tensor(pred, dtype=tf.float32)

    np_errors = FindErrors(_true, pred)

    print('corr_coeff {:<10.5f} {:<10.5f}'.format(np_errors.corr_coeff(),
                                                  K.eval(corr_coeff(t, p))))
    print('r2 {:<10.5f} {:<10.5f}'.format(np_errors.r2(),
                                          1.0 - K.eval(tf_r2(t, p))))
    print('nse {:<10.5f} {:<10.5f}'.format(np_errors.nse(),
                                           1.0 - K.eval(tf_nse(t, p))))
    print('kge {:<10.5f} {:<10.5f}'.format(np_errors.kge(),
                                           1.0 - K.eval(tf_kge(t, p))))
    print('r2_mod {:<10.5f} {:<10.5f}'.format(np_errors.r2_mod(),
                                              1.0 - K.eval(tf_r2_mod(t, p))))
    print('nse_beta {:<10.5f} {:<10.5f}'.format(
        np_errors.nse_beta(), 1.0 - K.eval(tf_nse_beta(t, p))))
    print('nse_alpha {:<10.5f} {:<10.5f}'.format(
        np_errors.nse_alpha(), 1.0 - K.eval(tf_nse_alpha(t, p))))
예제 #12
0
 def on_epoch_end(self, epoch, logs={}):
     """At the end of each epoch save the value of beta in _beta list."""
     tmp = K.eval(self.beta)
     self._beta.append(tmp)
    def train(self,
              train_data_provider,
              val_data_provider,
              learning_rate,
              decay_steps,
              epochs,
              batch_size,
              augment=None,
              custom_callbacks=None):
        '''
        Start training the model from specified dataset
        :param train_dataset:
        :param learning_rate:
        :param decay_steps:
        :param epochs:
        :param augment:
        :param custom_callbacks:
        :return:
        '''
        assert self._is_training == True, 'not in training mode'

        if not osp.exists(self.log_dir):
            os.mkdir(self.log_dir)

        lr_schedule = keras.optimizers.schedules.ExponentialDecay(
            learning_rate, decay_steps, decay_rate=0.95, staircase=True)
        optimizer = keras.optimizers.Adam(learning_rate=lr_schedule)
        #optimizer = keras.optimizers.SGD(lr=learning_rate, decay= learning_rate/decay_steps, momentum=0.92, nesterov=True)#
        self.summary_writer = tf.summary.create_file_writer(self.log_dir)
        with self.summary_writer.as_default():
            max_accuracy = 0.0
            for self.epoch in range(epochs):
                print('# epoch:' + str(self.epoch + 1) + '/' + str(epochs))
                losses = []
                for step in range(self.config.STEPS_PER_EPOCH):
                    ims, label_gt = train_data_provider.next_batch(batch_size)

                    with tf.GradientTape(persistent=False) as tape:
                        label_preds = self.model(ims)
                        loss = (1.0 - self.get_dice_coefficient(
                            label_gt, label_preds, self.num_classes)
                                ) + self.__compute_loss(label_gt, label_preds)

                        losses.append(loss)
                        grad = tape.gradient(loss,
                                             self.model.trainable_variables)
                        optimizer.apply_gradients(grads_and_vars=zip(
                            grad, self.model.trainable_variables))
                        self.__draw_progress_bar(step + 1,
                                                 self.config.STEPS_PER_EPOCH)

                tst_ims, tst_y_gt = val_data_provider.next_batch(8)

                predictions = self.predict(tst_ims)

                label_preds = tf.reshape(predictions, [-1, self.num_classes])
                gt_labels = tf.reshape(tst_y_gt, [-1])

                pred_labels = tf.argmax(label_preds, axis=-1)

                keep = tf.where(tf.not_equal(gt_labels, 0))
                gt_labels = tf.gather(gt_labels, keep)

                pred_labels = tf.gather(pred_labels, keep)

                m = keras.metrics.Accuracy()
                m.update_state(gt_labels, pred_labels)
                accuracy = m.result().numpy()
                mean_loss = tf.reduce_mean(losses)
                print('\nLoss:%f; Accuracy:%f; Lr: %f' %
                      (mean_loss, accuracy,
                       KB.eval(optimizer._decayed_lr('float32'))))
                tf.summary.scalar('train_loss',
                                  mean_loss,
                                  step=(self.epoch + 1))
                tf.summary.scalar('eval_accuracy',
                                  float(accuracy),
                                  step=(self.epoch + 1))

                m.reset_states()
                if accuracy >= max_accuracy or accuracy > 0.98:
                    max_accuracy = accuracy
                    self.checkpoint_path = osp.join(
                        self.log_dir,
                        self.NET_NAME.lower() +
                        "_epoch{0}.h5".format(self.epoch + 1))
                    print('Saving weights to %s' % (self.checkpoint_path))
                    self.model.save_weights(self.checkpoint_path)
                    self.__delete_old_weights(
                        self.config.MAX_KEEPS_CHECKPOINTS)
예제 #14
0
def main():
  # check the mean value of samples from stochastic_rounding for po2
  np.random.seed(42)
  count = 100000
  val = 42
  a = K.constant([val] * count)
  b = quantized_po2(use_stochastic_rounding=True)(a)
  res = np.sum(K.eval(b)) / count
  print(res, "should be close to ", val)
  b = quantized_relu_po2(use_stochastic_rounding=True)(a)
  res = np.sum(K.eval(b)) / count
  print(res, "should be close to ", val)
  a = K.constant([-1] * count)
  b = quantized_relu_po2(use_stochastic_rounding=True)(a)
  res = np.sum(K.eval(b)) / count
  print(res, "should be all ", 0)

  # non-stochastic rounding quantizer.
  a = K.constant([-3.0, -2.0, -1.0, -0.5, 0.0, 0.5, 1.0, 2.0, 3.0])
  a = K.constant([0.194336])
  print(" a =", K.eval(a).astype(np.float16))
  print("qa =", K.eval(quantized_relu(6,2)(a)).astype(np.float16))
  print("ss =", K.eval(smooth_sigmoid(a)).astype(np.float16))
  print("hs =", K.eval(hard_sigmoid(a)).astype(np.float16))
  print("ht =", K.eval(hard_tanh(a)).astype(np.float16))
  print("st =", K.eval(smooth_tanh(a)).astype(np.float16))
  c = K.constant(np.arange(-1.5, 1.51, 0.3))
  print(" c =", K.eval(c).astype(np.float16))
  print("qb_111 =", K.eval(quantized_bits(1,1,1)(c)).astype(np.float16))
  print("qb_210 =", K.eval(quantized_bits(2,1,0)(c)).astype(np.float16))
  print("qb_211 =", K.eval(quantized_bits(2,1,1)(c)).astype(np.float16))
  print("qb_300 =", K.eval(quantized_bits(3,0,0)(c)).astype(np.float16))
  print("qb_301 =", K.eval(quantized_bits(3,0,1)(c)).astype(np.float16))
  c_1000 = K.constant(np.array([list(K.eval(c))] * 1000))
  b = np.sum(K.eval(bernoulli()(c_1000)).astype(np.int32), axis=0) / 1000.0
  print("       hs =", K.eval(hard_sigmoid(c)).astype(np.float16))
  print("    b_all =", b.astype(np.float16))
  T = 0.0
  t = K.eval(stochastic_ternary(alpha="auto")(c_1000))
  for i in range(10):
    print("stochastic_ternary({}) =".format(i), t[i])
  print("   st_all =", np.round(
      np.sum(t.astype(np.float32), axis=0).astype(np.float16) /
      1000.0, 2).astype(np.float16))
  print("  ternary =", K.eval(ternary(threshold=0.5)(c)).astype(np.int32))
  c = K.constant(np.arange(-1.5, 1.51, 0.3))
  print(" c =", K.eval(c).astype(np.float16))
  print(" b_10 =", K.eval(binary(1)(c)).astype(np.float16))
  print("qr_10 =", K.eval(quantized_relu(1,0)(c)).astype(np.float16))
  print("qr_11 =", K.eval(quantized_relu(1,1)(c)).astype(np.float16))
  print("qr_20 =", K.eval(quantized_relu(2,0)(c)).astype(np.float16))
  print("qr_21 =", K.eval(quantized_relu(2,1)(c)).astype(np.float16))
  print("qr_101 =", K.eval(quantized_relu(1,0,1)(c)).astype(np.float16))
  print("qr_111 =", K.eval(quantized_relu(1,1,1)(c)).astype(np.float16))
  print("qr_201 =", K.eval(quantized_relu(2,0,1)(c)).astype(np.float16))
  print("qr_211 =", K.eval(quantized_relu(2,1,1)(c)).astype(np.float16))
  print("qt_200 =", K.eval(quantized_tanh(2,0)(c)).astype(np.float16))
  print("qt_210 =", K.eval(quantized_tanh(2,1)(c)).astype(np.float16))
  print("qt_201 =", K.eval(quantized_tanh(2,0,1)(c)).astype(np.float16))
  print("qt_211 =", K.eval(quantized_tanh(2,1,1)(c)).astype(np.float16))
  set_internal_sigmoid("smooth"); print("with smooth sigmoid")
  print("qr_101 =", K.eval(quantized_relu(1,0,1)(c)).astype(np.float16))
  print("qr_111 =", K.eval(quantized_relu(1,1,1)(c)).astype(np.float16))
  print("qr_201 =", K.eval(quantized_relu(2,0,1)(c)).astype(np.float16))
  print("qr_211 =", K.eval(quantized_relu(2,1,1)(c)).astype(np.float16))
  print("qt_200 =", K.eval(quantized_tanh(2,0)(c)).astype(np.float16))
  print("qt_210 =", K.eval(quantized_tanh(2,1)(c)).astype(np.float16))
  print("qt_201 =", K.eval(quantized_tanh(2,0,1)(c)).astype(np.float16))
  print("qt_211 =", K.eval(quantized_tanh(2,1,1)(c)).astype(np.float16))
  set_internal_sigmoid("real"); print("with real sigmoid")
  print("qr_101 =", K.eval(quantized_relu(1,0,1)(c)).astype(np.float16))
  print("qr_111 =", K.eval(quantized_relu(1,1,1)(c)).astype(np.float16))
  print("qr_201 =", K.eval(quantized_relu(2,0,1)(c)).astype(np.float16))
  print("qr_211 =", K.eval(quantized_relu(2,1,1)(c)).astype(np.float16))
  print("qt_200 =", K.eval(quantized_tanh(2,0)(c)).astype(np.float16))
  print("qt_210 =", K.eval(quantized_tanh(2,1)(c)).astype(np.float16))
  print("qt_201 =", K.eval(quantized_tanh(2,0,1)(c)).astype(np.float16))
  print("qt_211 =", K.eval(quantized_tanh(2,1,1)(c)).astype(np.float16))
  set_internal_sigmoid("hard")
  print(" c =", K.eval(c).astype(np.float16))
  print("q2_31 =", K.eval(quantized_po2(3,1)(c)).astype(np.float16))
  print("q2_32 =", K.eval(quantized_po2(3,2)(c)).astype(np.float16))
  print("qr2_21 =", K.eval(quantized_relu_po2(2,1)(c)).astype(np.float16))
  print("qr2_22 =", K.eval(quantized_relu_po2(2,2)(c)).astype(np.float16))
  print("qr2_44 =", K.eval(quantized_relu_po2(4,1)(c)).astype(np.float16))

  # stochastic rounding
  c = K.constant(np.arange(-1.5, 1.51, 0.3))
  print("q2_32_2 =", K.eval(quantized_relu_po2(32,2)(c)).astype(np.float16))
  b = K.eval(stochastic_binary()(c_1000)).astype(np.int32)
  for i in range(5):
    print("sbinary({}) =".format(i), b[i])
  print("sbinary =", np.round(np.sum(b, axis=0) / 1000.0, 2).astype(np.float16))
  print(" binary =", K.eval(binary()(c)).astype(np.int32))
  print(" c      =", K.eval(c).astype(np.float16))
  for i in range(10):
    print(" s_bin({}) =".format(i),
          K.eval(binary(use_stochastic_rounding=1)(c)).astype(np.int32))
  for i in range(10):
    print(" s_po2({}) =".format(i),
          K.eval(quantized_po2(use_stochastic_rounding=1)(c)).astype(np.int32))
  for i in range(10):
    print(
        " s_relu_po2({}) =".format(i),
        K.eval(quantized_relu_po2(use_stochastic_rounding=1)(c)).astype(
            np.int32))
def test_sub_pix_translation(translat):
    """
	To define the tests run by this function we note P the prediction function, the ground truth image G 
	and I the downsampled image from G.
	We note G_t the image the translated image G by t on the horizontal axis and I_t the downsampled image from G_t. 

	sc = || G - P(I) ||
	sc_tr = || G_t - P(I_t) ||
	sc_mu = || P(I_t) - P(I) ||
	sc_tau = || P(I_t)_(-t) - P(I) ||
	best_tau = min || P(I_t)_(t') - P(I) ||

	the minimum is searched over t'.

	Args :
		- translat (float): value of t

	Output :
		- average of the previous scores 
	"""
    eval_model = load_pre_trained_model()
    _, test_ground_truth = dataset.images_paths()

    score = []
    score_tr = []
    score_tr_tr = []
    score_mutuel = []
    score_mutuel2 = []
    score_tau = []
    best_tau = []

    for ind in range(len(test_ground_truth)):
        img = np.array(Image.open(test_ground_truth[ind])).astype(float)
        h, w, _ = img.shape
        img_ground_truth = img
        img_test = down_sampling(img_ground_truth)

        prediction = resolve_single(eval_model, img_test)
        prediction = K.eval(prediction).astype(float)
        score.append(np.sqrt(np.mean((prediction - img_ground_truth)**2)))

        img_ground_truth_t = ffttranslate(img_ground_truth, translat)
        img_test_t = down_sampling(img_ground_truth_t)

        prediction2 = resolve_single(eval_model, img_test_t)
        prediction2 = K.eval(prediction2).astype(float)
        score_tr.append(np.sqrt(np.mean(
            (prediction2 - img_ground_truth_t)**2)))

        score_mutuel.append(np.sqrt(np.mean((prediction2 - prediction)**2)))
        prediction_tau = ffttranslate(prediction2, -translat)
        score_mutuel2.append(
            np.sqrt(np.mean((prediction2 - prediction_tau)**2)))
        score_tau.append(np.sqrt(np.mean((prediction_tau - prediction)**2)))
        score_tr_tr.append(
            np.sqrt(np.mean((prediction_tau - img_ground_truth)**2)))

        func = lambda x: np.mean(
            (ffttranslate(prediction2, -x) - prediction)**2)
        tau_opt = opt.minimize(func, translat)
        best_tau.append(tau_opt.x)
        print(
            "t = %f - testing pre-trained model %i/%i : score - %f ; score tr - %f ; score mutuel - %f ; score translation inv - %f ; best tau - %f"
            % (translat, ind, len(test_ground_truth), score[-1], score_tr[-1],
               score_mutuel[-1], score_tau[-1], best_tau[-1]))
    return np.mean(score), np.mean(score_tr), np.mean(score_tr_tr), np.mean(
        score_mutuel), np.mean(score_mutuel2), np.mean(score_tau), np.mean(
            best_tau), np.std(best_tau)
예제 #16
0
x_train = []
y_train = []
person_rep = dict()
person_folders = os.listdir('Images_crop')
for i, person in enumerate(person_folders):
    person_rep[i] = person
    image_names = os.listdir('Images_crop/' + person + '/')
    for image_name in image_names:
        img = load_img('Images_crop/' + person + '/' + image_name,
                       target_size=(224, 224))
        img = img_to_array(img)
        img = np.expand_dims(img, axis=0)
        img = preprocess_input(img)
        img_encode = vgg_face(img)
        x_train.append(np.squeeze(K.eval(img_encode)).tolist())
        y_train.append(i)

        # Prepare Test Data
        x_test = []
        y_test = []
        test_image_names = os.listdir('Test_Images_crop/' + person + '/')
        for image_name in test_image_names:
            img = load_img('Test_Images_crop/' + person + '/' + image_name,
                           target_size=(224, 224))
            img = img_to_array(img)
            img = np.expand_dims(img, axis=0)
            img = preprocess_input(img)
            img_encode = vgg_face(img)
            x_test.append(np.squeeze(K.eval(img_encode)).tolist())
            y_test.append(i)
예제 #17
0
def main(dataset_name):

    dataset = load_dataset()

    raw_data = np.asarray(dataset['raw']['data'])
    raw_label = np.asarray(dataset['raw']['label'])
    num_classes = len(np.unique(raw_label))

    rskf = RepeatedStratifiedKFold(n_splits=k_folds,
                                   n_repeats=k_fold_reps,
                                   random_state=42)

    for e2efs_class in e2efs_classes:
        print('E2EFS-Method : ', e2efs_class.__name__)
        cont_seed = 0

        nfeats = []
        accuracies = []
        model_accuracies = []
        svc_accuracies = []
        fs_time = []
        BAs = []
        svc_BAs = []
        model_BAs = []
        mAPs = []
        svc_mAPs = []
        model_mAPs = []
        mus = []
        name = dataset_name + '_' + kernel + '_mu_' + str(mu)
        print(name)

        for j, (train_index,
                test_index) in enumerate(rskf.split(raw_data, raw_label)):
            print('k_fold', j, 'of', k_folds * k_fold_reps)

            train_data, train_labels = raw_data[train_index], raw_label[
                train_index]
            test_data, test_labels = raw_data[test_index], raw_label[
                test_index]

            train_labels = to_categorical(train_labels,
                                          num_classes=num_classes)
            test_labels = to_categorical(test_labels, num_classes=num_classes)

            valid_features = np.where(np.abs(train_data).sum(axis=0) > 0)[0]
            if len(valid_features) < train_data.shape[1]:
                print('Removing', train_data.shape[1] - len(valid_features),
                      'zero features')
                train_data = train_data[:, valid_features]
                test_data = test_data[:, valid_features]

            model_kwargs = {
                'mu': mu / len(train_data),
                'kernel': kernel,
                'degree': 3
            }

            svc_kwargs = {'C': 1.0, 'solver': 0.}

            for i, n_features in enumerate([10, 50, 100, 150, 200]):
                n_accuracies = []
                n_svc_accuracies = []
                n_model_accuracies = []
                n_BAs = []
                n_svc_BAs = []
                n_model_BAs = []
                n_mAPs = []
                n_svc_mAPs = []
                n_model_mAPs = []
                n_train_accuracies = []
                n_time = []
                print('n_features : ', n_features)

                heatmaps = []
                weight = train_labels[:, -1].mean()
                for r in range(reps):
                    np.random.seed(cont_seed)
                    K.tf.set_random_seed(cont_seed)
                    cont_seed += 1

                    model = train_Keras(
                        train_data,
                        train_labels,
                        test_data,
                        test_labels,
                        model_kwargs,
                        e2efs_class=e2efs_class,
                        n_features=n_features,
                    )
                    heatmaps.append(K.eval(model.heatmap))
                    n_time.append(model.fs_time)
                    test_data_norm = model.normalization.transform(test_data)
                    train_data_norm = model.normalization.transform(train_data)
                    test_pred = model.predict(test_data_norm)
                    n_model_accuracies.append(
                        model.evaluate(test_data_norm, test_labels,
                                       verbose=0)[-1])
                    n_model_BAs.append(balance_accuracy(
                        test_labels, test_pred))
                    n_model_mAPs.append(
                        average_precision_score(test_labels[:, -1], test_pred))
                    train_acc = model.evaluate(train_data_norm,
                                               train_labels,
                                               verbose=0)[-1]
                    print('n_features : ', n_features, ', accuracy : ',
                          n_model_accuracies[-1], ', BA : ', n_model_BAs[-1],
                          ', mAP : ', n_model_mAPs[-1], ', train_accuracy : ',
                          train_acc, ', time : ', n_time[-1], 's')
                    del model
                    K.clear_session()

                heatmap = np.mean(heatmaps, axis=0)
                best_features = np.argsort(heatmap)[::-1][:n_features]

                svc_train_data = train_data[:, best_features]
                svc_test_data = test_data[:, best_features]

                norm = normalization_func()
                svc_train_data_norm = norm.fit_transform(svc_train_data)
                svc_test_data_norm = norm.transform(svc_test_data)

                bestcv = -1
                bestc = None
                bestSolver = None
                for s in [0, 1, 2, 3]:
                    for my_c in [
                            0.001, 0.1, 0.5, 1.0, 1.4, 1.5, 1.6, 2.0, 2.5, 5.0,
                            100.0
                    ]:
                        cmd = '-v 5 -s ' + str(s) + ' -c ' + str(my_c) + ' -q'
                        cv = liblinearutil.train(
                            (2 * train_labels[:, -1] - 1).tolist(),
                            svc_train_data_norm.tolist(), cmd)
                        if cv > bestcv:
                            # print('Best -> C:', my_c, ', s:', s, ', acc:', cv)
                            bestcv = cv
                            bestc = my_c
                            bestSolver = s
                svc_kwargs['C'] = bestc
                svc_kwargs['solver'] = bestSolver
                print('Best -> C:', bestc, ', s:', bestSolver, ', acc:',
                      bestcv)

                for r in range(reps):
                    np.random.seed(cont_seed)
                    K.tf.set_random_seed(cont_seed)
                    cont_seed += 1

                    model = train_SVC(svc_train_data_norm, train_labels,
                                      svc_kwargs)
                    _, accuracy, test_pred = liblinearutil.predict(
                        (2 * test_labels[:, -1] - 1).tolist(),
                        svc_test_data_norm.tolist(), model, '-q')
                    test_pred = np.asarray(test_pred)
                    n_svc_accuracies.append(accuracy[0])
                    n_svc_BAs.append(balance_accuracy(test_labels, test_pred))
                    n_svc_mAPs.append(
                        average_precision_score(test_labels[:, -1], test_pred))
                    del model
                    model = train_Keras(svc_train_data, train_labels,
                                        svc_test_data, test_labels,
                                        model_kwargs)
                    train_data_norm = model.normalization.transform(
                        svc_train_data)
                    test_data_norm = model.normalization.transform(
                        svc_test_data)
                    test_pred = model.predict(test_data_norm)
                    n_BAs.append(balance_accuracy(test_labels, test_pred))
                    n_mAPs.append(
                        average_precision_score(test_labels[:, -1], test_pred))
                    n_accuracies.append(
                        model.evaluate(test_data_norm, test_labels,
                                       verbose=0)[-1])
                    n_train_accuracies.append(
                        model.evaluate(train_data_norm,
                                       train_labels,
                                       verbose=0)[-1])
                    del model
                    K.clear_session()
                    print(
                        'n_features : ',
                        n_features,
                        ', acc : ',
                        n_accuracies[-1],
                        ', BA : ',
                        n_BAs[-1],
                        ', mAP : ',
                        n_mAPs[-1],
                        ', train_acc : ',
                        n_train_accuracies[-1],
                        ', svc_acc : ',
                        n_svc_accuracies[-1],
                        ', svc_BA : ',
                        n_svc_BAs[-1],
                        ', svc_mAP : ',
                        n_svc_mAPs[-1],
                    )
                if i >= len(accuracies):
                    accuracies.append(n_accuracies)
                    svc_accuracies.append(n_svc_accuracies)
                    model_accuracies.append(n_model_accuracies)
                    BAs.append(n_BAs)
                    mAPs.append(n_mAPs)
                    fs_time.append(n_time)
                    svc_BAs.append(n_svc_BAs)
                    svc_mAPs.append(n_svc_mAPs)
                    model_BAs.append(n_model_BAs)
                    model_mAPs.append(n_model_mAPs)
                    nfeats.append(n_features)
                    mus.append(model_kwargs['mu'])
                else:
                    accuracies[i] += n_accuracies
                    svc_accuracies[i] += n_svc_accuracies
                    model_accuracies[i] += n_model_accuracies
                    fs_time[i] += n_time
                    BAs[i] += n_BAs
                    mAPs[i] += n_mAPs
                    svc_BAs[i] += n_svc_BAs
                    svc_mAPs[i] += n_svc_mAPs
                    model_BAs[i] += n_model_BAs
                    model_mAPs[i] += n_model_mAPs

        output_filename = directory + 'LinearSVC_' + kernel + '_' + e2efs_class.__name__ + '.json'

        if not os.path.isdir(directory):
            os.makedirs(directory)

        info_data = {
            'kernel': kernel,
            'reps': reps,
            'classification': {
                'mus':
                mus,
                'n_features':
                nfeats,
                'accuracy':
                accuracies,
                'mean_accuracy':
                np.array(accuracies).mean(axis=1).tolist(),
                'svc_accuracy':
                svc_accuracies,
                'mean_svc_accuracy':
                np.array(svc_accuracies).mean(axis=1).tolist(),
                'model_accuracy':
                model_accuracies,
                'mean_model_accuracy':
                np.array(model_accuracies).mean(axis=1).tolist(),
                'BA':
                BAs,
                'mean_BA':
                np.array(BAs).mean(axis=1).tolist(),
                'mAP':
                mAPs,
                'mean_mAP':
                np.array(mAPs).mean(axis=1).tolist(),
                'svc_BA':
                svc_BAs,
                'svc_mean_BA':
                np.array(svc_BAs).mean(axis=1).tolist(),
                'svc_mAP':
                svc_mAPs,
                'svc_mean_mAP':
                np.array(svc_mAPs).mean(axis=1).tolist(),
                'model_BA':
                model_BAs,
                'model_mean_BA':
                np.array(model_BAs).mean(axis=1).tolist(),
                'model_mAP':
                model_mAPs,
                'model_mean_mAP':
                np.array(model_mAPs).mean(axis=1).tolist(),
                'fs_time':
                fs_time
            }
        }

        for k, v in info_data['classification'].items():
            if 'mean' in k:
                print(k, v)

        with open(output_filename, 'w') as outfile:
            json.dump(info_data, outfile)
예제 #18
0
파일: utils.py 프로젝트: rohanshenoy/qkeras
def quantized_model_debug(model, X_test, plot=False):
    """Debugs and plots model weights and activations."""

    outputs = []
    output_names = []

    for layer in model.layers:
        if layer.__class__.__name__ in REGISTERED_LAYERS:
            output_names.append(layer.name)
            outputs.append(layer.output)

    model_debug = Model(inputs=model.inputs, outputs=outputs)

    y_pred = model_debug.predict(X_test)

    print("{:30} {: 8.4f} {: 8.4f}".format("input", np.min(X_test),
                                           np.max(X_test)))

    for n, p in zip(output_names, y_pred):
        layer = model.get_layer(n)
        if (layer.__class__.__name__ in "QActivation"
                or layer.__class__.__name__ in "QAdaptiveActivation"):
            alpha = get_weight_scale(layer.activation, p)
        else:
            alpha = 1.0
        print("{:30} {: 8.4f} {: 8.4f}".format(n, np.min(p / alpha),
                                               np.max(p / alpha)),
              end="")
        if alpha != 1.0:
            print(" a[{: 8.4f} {:8.4f}]".format(np.min(alpha), np.max(alpha)))
        if plot and layer.__class__.__name__ in [
                "QConv1D", "QConv2D", "QConv2DTranspose", "QDense",
                "QActivation", "QAdaptiveActivation", "QSimpleRNN", "QLSTM",
                "QGRU", "QBidirectional", "QSeparableConv1D",
                "QSeparableConv2D"
        ]:
            plt.hist(p.flatten(), bins=25)
            plt.title(layer.name + "(output)")
            plt.show()
        alpha = None

        if layer.__class__.__name__ not in [
                "QConv2DBatchnorm", "QDepthwiseConv2DBatchnorm"
        ]:
            weights_to_examine = layer.get_weights()
        else:
            weights_to_examine = layer.get_folded_weights()

        for i, weights in enumerate(weights_to_examine):
            if hasattr(layer, "get_quantizers") and layer.get_quantizers()[i]:
                weights = K.eval(layer.get_quantizers()[i](
                    K.constant(weights)))
                if i == 0 and layer.__class__.__name__ in [
                        "QConv1D", "QConv2D", "QConv2DTranspose", "QDense",
                        "QSimpleRNN", "QLSTM", "QGRU", "QSeparableConv1D",
                        "QSeparableConv2D", "QConv2DBatchnorm",
                        "QDepthwiseConv2DBatchnorm"
                ]:
                    alpha = get_weight_scale(layer.get_quantizers()[i],
                                             weights)
                    # if alpha is 0, let's remove all weights.
                    alpha_mask = (alpha == 0.0)
                    weights = np.where(alpha_mask, weights * alpha,
                                       weights / alpha)
                    if plot:
                        plt.hist(weights.flatten(), bins=25)
                        plt.title(layer.name + "(weights)")
                        plt.show()
            print(" ({: 8.4f} {: 8.4f})".format(np.min(weights),
                                                np.max(weights)),
                  end="")
        if alpha is not None and isinstance(alpha, np.ndarray):
            print(" a({: 10.6f} {: 10.6f})".format(np.min(alpha),
                                                   np.max(alpha)),
                  end="")
        print("")
예제 #19
0
 def on_batch_end(self, batch):
     # evaluate the variables and save them into lists
     # self.targets.append(K.eval(self.var_y_true))
     self.model.update_prior(K.eval(self.var_y_true))
예제 #20
0
f = K.function([A, X], [Y])

Aval = np.arange(1, 10).reshape((3, 3))
Xval = np.arange(1, 4).reshape((3, 1))

Yval = f([Aval, Xval])

print("Produit matrice x vecteur :\n", Yval)

# Partie B - Gradient

# Une variable (en un seul points)
x = K.constant([3])
y = x**2
grad = K.gradients(y, x)
print("Dérivée :", K.eval(grad[0]))

# Une variable (en plusieurs points)
x = K.constant([1, 2, 3, 4])
y = x**2
grad = K.gradients(y, x)
print("Dérivée :", K.eval(grad[0]))


# Fonction personnalisée
def f(x):
    return 2 * K.log(x) + K.exp(-x)


X = K.arange(1, 4, 0.5)
Y = f(X)
print("\nEvaluation Before training - At Initialization")
autoencoder.evaluate(x=df_eval_conv,
                     y=df_eval_conv,
                     batch_size=BATCH_SIZE,
                     verbose=0,
                     callbacks=[progbar_callback])
print(
    "\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%"
)

for epoch in range(4):
    print("\nTraining Epoch: %d/4" % (epoch + 1))

    print("[*] Optimizer `learning_rate`: %f" %
          K.eval(optimizer.learning_rate))
    print("[*] Optimizer `lr`: %f" % K.eval(optimizer.lr))

    print("\n[*] Optimizer `loss_scale_value`: %f" %
          K.eval(optimizer.loss_scale_value))

    try:
        print("[*] Optimizer `loss_scale_increment_period`: %f" %
              K.eval(optimizer.loss_scale_increment_period))
        print("[*] Optimizer `loss_scale_multiplier`: %f" %
              K.eval(optimizer.loss_scale_multiplier))
        print("[*] Optimizer `num_good_steps`: %f" %
              K.eval(optimizer.num_good_steps))
    except ValueError:
        if not isinstance(loss_scaler, tf.train.experimental.DynamicLossScale):
            pass
def create_model(network_type, dataset_name, init_modules, model_params, size,
                 num_classes, lucid, _log):

    assert network_type in ('mlp', 'mlp_regression', 'cnn', 'cnn_vgg')

    if network_type == 'mlp' or network_type == 'mlp_regression':
        layers = create_mlp_layers()

    else:
        layers = create_cnn_layers()

    model = tf.keras.Sequential(layers)

    if (network_type == 'mlp'
            or network_type == 'mlp_regression') and init_modules > 0:

        down_weight = 0.6
        up_weight = 1 + (1 - down_weight) * (init_modules - 1)

        layer_widths = [size] + model_params['widths']
        if num_classes <= init_modules:
            assignments = [np.random.randint(0, init_modules, size=layer_widths[i])
                           for i in range(len(layer_widths))] + \
                          [np.array(range(num_classes))]
        else:
            assignments = [np.random.randint(0, init_modules, size=layer_widths[i])
                           for i in range(len(layer_widths))] + \
                          [np.random.randint(0, init_modules, size=num_classes)]

        dense_i = 0
        for lyr_i, lyr in enumerate(model.layers):
            if 'dense' not in lyr.name.lower():  # skip dropout layers
                continue
            if dense_i == 0:  # skip the first layer because the pixels aren't always the same
                dense_i += 1
                continue
            weights = K.eval(lyr.weights[0][:])
            in_assign = assignments[dense_i]
            out_assign = assignments[dense_i + 1]
            for in_i in range(weights.shape[0]):
                for out_i in range(weights.shape[1]):
                    if in_assign[in_i] == out_assign[out_i]:
                        weights[in_i, out_i] *= up_weight
                    else:
                        weights[in_i, out_i] *= down_weight
            if len(lyr.weights) > 1:
                model.layers[lyr_i].set_weights(
                    (weights, K.eval(lyr.weights[1][:])))
            else:
                model.layers[lyr_i].set_weights(weights)
            dense_i += 1

    elif init_modules > 0:

        down_weight = 0.8
        up_weight = 1 + (1 - down_weight) * (init_modules - 1)

        filter_counts = [cl['filters'] for cl in model_params['conv']]
        assignments = [
            np.random.randint(0, init_modules, size=filter_counts[i])
            for i in range(len(filter_counts))
        ]

        conv_i = 0
        for lyr_i, lyr in enumerate(model.layers):
            if 'conv' not in lyr.name.lower(
            ):  # skip dropout and pooling layers
                continue
            if conv_i == 0:  # skip the first layer because the pixels aren't always the same
                conv_i += 1
                continue
            # conv layer weights have shape (conv_height, conv_width, in_channels, out_channels)
            weights = K.eval(lyr.weights[0][:])
            in_assign = assignments[conv_i - 1]
            out_assign = assignments[conv_i]
            for in_i in range(weights.shape[2]):
                for out_i in range(weights.shape[3]):
                    if in_assign[in_i] == out_assign[out_i]:
                        weights[:, :, in_i, out_i] *= up_weight
                    else:
                        weights[:, :, in_i, out_i] *= down_weight
            if len(lyr.weights) > 1:
                model.layers[lyr_i].set_weights(
                    (weights, K.eval(lyr.weights[1][:])))
            else:
                model.layers[lyr_i].set_weights(weights)
            conv_i += 1

    return model
예제 #23
0
 def on_epoch_end(self, epoch, logs={}):
     optimizer = self.model.optimizer
     lr = K.eval(optimizer.lr)
     epoch_count = epoch + 1
     print('\n', "Epoch:", epoch_count, ', LR: {:.2f}'.format(lr))
예제 #24
0
 def get_weights(self):
     return K.eval(self.kernel)
예제 #25
0
    batch_grads = []
    ctr = 0

    for x, y in train_data:
        weights = np.ones(shape=[x.shape[0]]) * (1.0 / x.shape[0])
        batch_grads.append(grad_func([x, y, weights]))
        ctr += 1

    mean_grad = (1.0 / ctr) * np.add.reduce(batch_grads)
    print(np.sqrt(np.sum(np.power(mean_grad, 2.0))))

if args.grad_norm:
    cbs.append(LambdaCallback(on_epoch_end=mean_grad_norm))

hist = model.fit(
        train_data,
        validation_data=val_data,
        epochs=int(args.epochs),
        verbose=0 if args.quiet else 1,
        callbacks=cbs)

if args.quiet and not args.norm_hist:
    print("{},{},{},{},{},{},{},{},{}".format(args.network, args.reg_method, args.reg_norm, args.reg_extractor, args.reg_classifier, hist.history["loss"][-1], hist.history["accuracy"][-1], hist.history["val_loss"][-1], hist.history["val_accuracy"][-1]))
elif args.norm_hist:
    for (l, z) in zip(model.layers, zero.layers):
        if isinstance(l, (Conv2D, Dense)):
            mars = K.eval(_linf_norm(l.weights[0] - z.weights[0]))
            frob = K.eval(_frob_norm(l.weights[0] - z.weights[0]))
            print("{},{}".format(mars, frob))

예제 #26
0
 def get_pruneamount(self):
     weights_mask = K.eval(self.masktype(self.score))
     nz = np.count_nonzero(weights_mask)
     total = weights_mask.size
     return nz, total, nz / total
예제 #27
0
def main(dataset_name):

    dataset = load_dataset()

    raw_data = np.asarray(dataset['raw']['data'])
    raw_label = np.asarray(dataset['raw']['label'])
    num_classes = len(np.unique(raw_label))

    rskf = RepeatedStratifiedKFold(n_splits=k_folds,
                                   n_repeats=k_fold_reps,
                                   random_state=42)

    for e2efs_class, e2efs_kwargs, T, extra_epochs in e2efs_classes:
        print('E2EFS-Method : ', e2efs_class.__name__)

        nfeats = []
        accuracies = []
        model_accuracies = []
        BAs = []
        model_BAs = []
        mAPs = []
        model_mAPs = []
        name = dataset_name + '_three_layer_nn'
        print(name)

        for j, (train_index,
                test_index) in enumerate(rskf.split(raw_data, raw_label)):
            print('k_fold', j, 'of', k_folds * k_fold_reps)

            train_data, train_labels = raw_data[train_index], raw_label[
                train_index]
            test_data, test_labels = raw_data[test_index], raw_label[
                test_index]

            train_labels = to_categorical(train_labels,
                                          num_classes=num_classes)
            test_labels = to_categorical(test_labels, num_classes=num_classes)

            valid_features = np.where(np.abs(train_data).sum(axis=0) > 0)[0]
            if len(valid_features) < train_data.shape[1]:
                print('Removing', train_data.shape[1] - len(valid_features),
                      'zero features')
                train_data = train_data[:, valid_features]
                test_data = test_data[:, valid_features]

            model_kwargs = {'regularization': regularization}

            for i, n_features in enumerate([10, 50, 100, 150, 200]):
                n_accuracies = []
                n_model_accuracies = []
                n_BAs = []
                n_model_BAs = []
                n_mAPs = []
                n_model_mAPs = []
                n_train_accuracies = []
                print('n_features : ', n_features)

                heatmaps = []
                for r in range(reps):
                    model = train_Keras(train_data,
                                        train_labels,
                                        test_data,
                                        test_labels,
                                        model_kwargs,
                                        e2efs_class=e2efs_class,
                                        n_features=n_features,
                                        e2efs_kwargs=e2efs_kwargs)
                    heatmaps.append(K.eval(model.heatmap))
                    train_data_norm = model.normalization.transform(train_data)
                    test_data_norm = model.normalization.transform(test_data)
                    test_pred = model.predict(test_data_norm)
                    n_model_accuracies.append(
                        model.evaluate(test_data_norm, test_labels,
                                       verbose=0)[-1])
                    n_model_BAs.append(balance_accuracy(
                        test_labels, test_pred))
                    n_model_mAPs.append(
                        average_precision_score(test_labels, test_pred))
                    train_acc = model.evaluate(train_data_norm,
                                               train_labels,
                                               verbose=0)[-1]
                    print('n_features : ', n_features, ', accuracy : ',
                          n_model_accuracies[-1], ', BA : ', n_model_BAs[-1],
                          ', mAP : ', n_model_mAPs[-1], ', train_accuracy : ',
                          train_acc)
                    del model
                    K.clear_session()

                heatmap = np.mean(heatmaps, axis=0)
                best_features = np.argsort(heatmap)[::-1][:n_features]

                svc_train_data = train_data[:, best_features]
                svc_test_data = test_data[:, best_features]

                for r in range(reps):
                    model = train_Keras(svc_train_data, train_labels,
                                        svc_test_data, test_labels,
                                        model_kwargs)
                    train_data_norm = model.normalization.transform(
                        svc_train_data)
                    test_data_norm = model.normalization.transform(
                        svc_test_data)
                    test_pred = model.predict(test_data_norm)
                    n_BAs.append(balance_accuracy(test_labels, test_pred))
                    n_mAPs.append(
                        average_precision_score(test_labels, test_pred))
                    n_accuracies.append(
                        model.evaluate(test_data_norm, test_labels,
                                       verbose=0)[-1])
                    n_train_accuracies.append(
                        model.evaluate(train_data_norm,
                                       train_labels,
                                       verbose=0)[-1])
                    del model
                    K.clear_session()
                    print(
                        'n_features : ',
                        n_features,
                        ', acc : ',
                        n_accuracies[-1],
                        ', BA : ',
                        n_BAs[-1],
                        ', mAP : ',
                        n_mAPs[-1],
                        ', train_acc : ',
                        n_train_accuracies[-1],
                    )
                if i >= len(accuracies):
                    accuracies.append(n_accuracies)
                    model_accuracies.append(n_model_accuracies)
                    BAs.append(n_BAs)
                    mAPs.append(n_mAPs)
                    model_BAs.append(n_model_BAs)
                    model_mAPs.append(n_model_mAPs)
                    nfeats.append(n_features)
                else:
                    accuracies[i] += n_accuracies
                    model_accuracies[i] += n_model_accuracies
                    BAs[i] += n_BAs
                    mAPs[i] += n_mAPs
                    model_BAs[i] += n_model_BAs
                    model_mAPs[i] += n_model_mAPs

        output_filename = directory + 'three_layer_nn_' + e2efs_class.__name__ + '.json'

        if not os.path.isdir(directory):
            os.makedirs(directory)

        info_data = {
            'reps': reps,
            'classification': {
                'regularization':
                regularization,
                'n_features':
                nfeats,
                'accuracy':
                accuracies,
                'mean_accuracy':
                np.array(accuracies).mean(axis=1).tolist(),
                'model_accuracy':
                model_accuracies,
                'mean_model_accuracy':
                np.array(model_accuracies).mean(axis=1).tolist(),
                'BA':
                BAs,
                'mean_BA':
                np.array(BAs).mean(axis=1).tolist(),
                'mAP':
                mAPs,
                'mean_mAP':
                np.array(mAPs).mean(axis=1).tolist(),
                'model_BA':
                model_BAs,
                'model_mean_BA':
                np.array(model_BAs).mean(axis=1).tolist(),
                'model_mAP':
                model_mAPs,
                'model_mean_mAP':
                np.array(model_mAPs).mean(axis=1).tolist()
            }
        }

        for k, v in info_data['classification'].items():
            if 'mean' in k:
                print(k, v)

        with open(output_filename, 'w') as outfile:
            json.dump(info_data, outfile)
예제 #28
0
 def get_score(self):
     return K.eval(self.score)
예제 #29
0
            output_names.append(layer.name)
            outputs.append(layer.output)

    model_debug = Model(inputs=[x_in], outputs=outputs)

    outputs = model_debug.predict(x_train)

    print("{:30} {: 8.4f} {: 8.4f}".format("input", np.min(x_train),
                                           np.max(x_train)))

    for n, p in zip(output_names, outputs):
        print("{:30} {: 8.4f} {: 8.4f}".format(n, np.min(p), np.max(p)),
              end="")
        layer = model.get_layer(n)
        for i, weights in enumerate(layer.get_weights()):
            weights = K.eval(layer.get_quantizers()[i](K.constant(weights)))
            print(" ({: 8.4f} {: 8.4f})".format(np.min(weights),
                                                np.max(weights)),
                  end="")
            print("")

    p_test = mo.predict(x_test)
    p_test.tofile("p_test.bin")

    score = model.evaluate(x_test, y_test, verbose=VERBOSE)
    print("Test score:", score[0])
    print("Test accuracy:", score[1])

    all_weights = []
    model_save_quantized_weights(model)
예제 #30
0
 def get_mask(self):
     return K.eval(self.masktype(self.score))