コード例 #1
0
 def _exp2key(self, exp):
     if isinstance(exp, anytree.node.Node):
         return dbac_expression.exp2list_parse(exp)
     elif isinstance(exp, (list, tuple, np.ndarray)):
         return dbac_expression.exp2list_parse(dbac_expression.list2exp_parse(exp))
     else:
         raise ValueError("Not supported expression format")
コード例 #2
0
 def score(self, images_path, expressions, **kwargs):
     scores = np.zeros((len(expressions), images_path.shape[0]), dtype=np.float)
     images_feat = self.feat_ext.compute(images_path)
     ops_dic = {dbac_expression.OPS[0]: lambda v: 1.0 - v, dbac_expression.OPS[1]: np.multiply,
                dbac_expression.OPS[2]: lambda v1, v2: (v1 + v2) - np.multiply(v1, v2)}
     var_dic = {str(p): self.prim_rpr.get_cls(int(p))[0].predict_proba(images_feat)[:, 1] for p in
                self.prim_rpr.get_ids()}
     for idx, exp_lst in enumerate(expressions):
         exp_tree = dbac_expression.list2exp_parse(exp_lst)
         scores[idx] = dbac_expression.eval_exp(exp_tree, var_dic, ops_dic)
         if idx % 100 == 0:
             _logger.info("Tested for {}/{} expressions.".format(idx, len(expressions)))
     return scores
コード例 #3
0
def _compute_svm_params(img_feats, prim_labels, expressions, exp_idx):
    # setup svm
    exp_lst = expressions[exp_idx]
    _logger.info("{}/{} - Training svm  ...".format(exp_idx, len(expressions)))
    exp_tree = dbac_expression.list2exp_parse(exp_lst)
    var_dic = {p: prim_labels[:, int(p)] for p in dbac_expression.get_vars(exp_tree)}
    exp_labels = dbac_expression.eval_exp(exp_tree, var_dic)
    svm_object = sklearn_svm.LinearSVC(C=1e-5, class_weight={1: 2.0, 0: 1.0}, verbose=0, penalty='l2',
                                       loss='hinge', dual=True)
    svm_object.fit(img_feats, exp_labels)
    train_acc = svm_object.score(img_feats, exp_labels)
    _logger.info("{}/{} - Finalized svm. Positives {}, Negatives {}, Accuracy {}."
                 .format(exp_idx, len(expressions), np.sum(exp_labels), np.sum(np.logical_not(exp_labels)), train_acc))
    svm_params = np.hstack((svm_object.intercept_.ravel(), svm_object.coef_.ravel()))
    return svm_params
コード例 #4
0
def _sample_pos_neg(result_file, samp_size=10):
    # read file
    res = np.load(result_file).item()
    pred = res['test_pred']
    gt = res['test_gt']
    imgs = res['test_imgs']
    exps = res['test_exps']

    for i in range(gt.shape[0]):
        print("Expression: {}".format(
            RenderTree(dbac_expression.list2exp_parse(exps[i]))))

        # find equal error rate threhold
        fpr, tpr, ths = skmetrics.roc_curve(gt[i], pred[i])
        eer = dbac_compute_metrics._compute_eer(gt[i].reshape(-1, 1),
                                                pred[i].reshape(-1, 1))[0]
        ths = interp1d(fpr, ths)(0.20)
        ths_pred = pred[i] >= ths

        # compute metrics
        tn, fp, fn, tp = skmetrics.confusion_matrix(gt[i], ths_pred).ravel()
        print("EER={}, THS={}, TPR={}, FPR={}, tn={}, fp={}, fn={}, tp={}".
              format(eer, ths, tp / (tp + fn), fp / (tn + fp), tn, fp, fn, tp))

        # sampling images for tp, fp, fn and tn
        tp_img_ids = np.where(ths_pred * gt[i])[0]
        fp_img_ids = np.where(ths_pred * np.logical_not(gt[i]))[0]
        fn_img_ids = np.where(np.logical_not(ths_pred) * gt[i])[0]
        tn_img_ids = np.where(
            np.logical_not(ths_pred) * np.logical_not(gt[i]))[0]
        assert (len(tp_img_ids), len(fp_img_ids), len(fn_img_ids),
                len(tn_img_ids)) == (tp, fp, fn, tn)
        img_samples = [
            [] if len(img_ids) == 0 else np.random.choice(
                imgs[img_ids], samp_size, replace=True)
            for img_ids in [tp_img_ids, fp_img_ids, fn_img_ids, tn_img_ids]
        ]
        print("TP imgs: {}, FP imgs: {}, FN imgs: {}, Tn imgs:{}".format(
            *img_samples))

        # plot images
        f, grid = plt.subplots(4, samp_size)
        for p, samps in enumerate(img_samples):
            for q, samp in enumerate(samps):
                grid[p, q].imshow(mpimg.imread(samp), interpolation="bicubic")
                grid[p, q].xaxis.set_ticks([])
                grid[p, q].yaxis.set_ticks([])
        plt.show()
コード例 #5
0
 def inference(self, expressions, **kwargs):
     ops_dic = dict()
     ops_dic[dbac_expression.OPS[0]] = lambda v1: -1 * v1
     ops_dic[dbac_expression.OPS[1]] = lambda v1, v2: np.ravel(self.tf_session.run(
         self.output_tn, feed_dict={self._prims_rpr_ph: np.expand_dims(np.hstack([v1, v2]), 0),
                                    self._switch_ph: 1 * np.ones(1, dtype=np.int)}))
     ops_dic[dbac_expression.OPS[2]] = lambda v1, v2: np.ravel(self.tf_session.run(
         self.output_tn, feed_dict={self._prims_rpr_ph: np.expand_dims(np.hstack([v1, v2]), 0),
                                    self._switch_ph: 2 * np.ones(1, dtype=np.int)}))
     var_dic = {str(p): self.prim_rpr.get_rpr(int(p))[0] for p in self.prim_rpr.get_ids()}
     exp_rpr = np.zeros((len(expressions), self.dim), dtype=np.float)
     for idx, exp_lst in enumerate(expressions):
         exp_tree = dbac_expression.list2exp_parse(exp_lst)
         exp_rpr[idx] = dbac_expression.eval_exp(exp_tree, var_dic, ops_dic)
         if (idx+1) % 250 == 0:
             _logger.info("Inference expression classifiers {}/{}.".format(idx, len(expressions)))
     return exp_rpr
コード例 #6
0
def _compute_statistics(db_name,
                        db_path,
                        split_file,
                        comb_file=None,
                        plot=False):
    # Read data
    logger.info("Loading dataset and split...")
    dataset = dbac_data.IDataset.factory(db_name, db_path)
    dataset.load_split(split_file, comb_file)

    # extract information
    train_labels = dataset.labels[dataset.images_split ==
                                  dbac_data.DB_IMAGE_SPLITS.index('train')]
    val_labels = dataset.labels[dataset.images_split ==
                                dbac_data.DB_IMAGE_SPLITS.index('val')]
    test_labels = dataset.labels[dataset.images_split ==
                                 dbac_data.DB_IMAGE_SPLITS.index('test')]
    logger.info("Number of images (train, val, test): {}, {}, {}".format(
        len(train_labels), len(val_labels), len(test_labels)))

    train_exps = dataset.expressions[dataset.expressions_split ==
                                     dbac_data.DB_EXP_SPLITS.index('train')]
    test_exps = dataset.expressions[dataset.expressions_split ==
                                    dbac_data.DB_EXP_SPLITS.index('test')]
    logger.info("Number of expressions (train, test): {}, {}".format(
        len(train_exps), len(test_exps)))

    valid_prims = np.where(dataset.valid_primitives)[0]
    logger.info("Number of primitives: {}".format(len(valid_prims)))

    if comb_file:
        train_combs = dataset.combinations[dataset.combinations_split ==
                                           dbac_data.DB_COMB_SPLITS.index(
                                               'train')]
        test_combs = dataset.combinations[dataset.combinations_split ==
                                          dbac_data.DB_COMB_SPLITS.index(
                                              'test')]
        logger.info("Number of combinations (train, test): {}, {}".format(
            len(train_combs), len(test_combs)))
    else:
        train_combs, test_combs = None, None

    # compute detailed statistics
    info_exp, info_prim, info_comb = [], [], []
    prim_box, exp_box, comb_box = [], [], []
    for labels, combs, exps, prims in zip(
        [train_labels, val_labels, test_labels],
        [train_combs, train_combs, test_combs],
        [train_exps, train_exps, test_exps], [valid_prims] * 3):
        count_prim = np.zeros(len(prims))
        for p, prim in enumerate(prims):
            count_prim[p] = np.sum(labels[:, prim])
        mean, std, min, max = np.mean(count_prim), np.std(count_prim), np.min(
            count_prim), np.max(count_prim)
        info_prim.append((mean, std, min, max))
        prim_box.append(count_prim)

        count_exp = np.zeros(len(exps))
        for e, exp in enumerate(exps):
            op, v_a, v_b = exp[0], labels[:, int(
                exp[1])], labels[:,
                                 int(exp[2])] if exp[2] is not None else None
            count_exp[e] = np.sum(dbac_expression.eval_op(op, v_a, v_b))
        mean, std, min, max = np.mean(count_exp), np.std(count_exp), np.min(
            count_exp), np.max(count_exp)
        info_exp.append((mean, std, min, max))
        exp_box.append(count_exp)

        if comb_file:
            count_comb = np.zeros(len(combs))
            for c, comb in enumerate(combs):
                comb_tree = dbac_expression.list2exp_parse(comb)
                var_dic = {
                    p: labels[:, int(p)]
                    for p in dbac_expression.get_vars(comb_tree)
                }
                count_comb[c] = dbac_expression.eval_exp(comb_tree,
                                                         var_dic).sum()
            mean, std, min, max = np.mean(count_comb), np.std(
                count_comb), np.min(count_comb), np.max(count_comb)
            info_comb.append((mean, std, min, max))
            comb_box.append(count_comb)

    logger.info(
        "Primitives sample density (mean, std, min, max) train, val, test: {}".
        format(info_prim))
    logger.info(
        "Expression sample density (mean, std, min, max) train, val, test: {}".
        format(info_exp))
    logger.info(
        "Compositions sample density (mean, std, min, max) train, val, test: {}"
        .format(info_comb))
    if plot:
        import matplotlib.pyplot as plt
        f, (ax1, ax2, ax3) = plt.subplots(1, 3)
        ax1.boxplot(prim_box, labels=['Train', 'Val', 'Test'], showmeans=True)
        ax1.set_title('Primitives', fontsize=16)
        ax1.set_ylabel('# Positive Images', fontsize=12)
        ax1.tick_params(axis='x', labelsize=12)
        ax2.boxplot(exp_box, labels=['Train', 'Val', 'Test'], showmeans=True)
        ax2.set_title('Expressions', fontsize=16)
        ax2.tick_params(axis='x', labelsize=12)
        if comb_file:
            ax3.boxplot(comb_box,
                        labels=['Train', 'Val', 'Test'],
                        showmeans=True)
            ax3.set_title('Compositions', fontsize=16)
            ax3.tick_params(axis='x', labelsize=12)
        f.tight_layout()
        plt.show()
コード例 #7
0
    def learning(self, images_path, labels, expressions, **kwargs):

        # training parameters
        batch_size, num_epochs = int(kwargs.get('batch_size', 32)), int(kwargs.get('num_epochs', 1e3))
        snap_int, snap_dir, log_dir = int(kwargs.get('snap_int', 250)), kwargs.get('snap_dir', None), kwargs.get(
            'log_dir', None)
        init_weights = kwargs.get('init_weights', None)
        snapshot = kwargs.get('snapshot', None)
        learning_rate = float(kwargs.get('learning_rate', 1e-5))
        alphas = [float(p) for p in kwargs.get('alphas', '10.0 1.0 0.1 1.0').split()]
        _logger.info("Training parameters: batch_size={}, num_epochs={}, snap_int={}, snap_dir={}, log_dir={}, "
                     "learning_rate={}, alphas={}, learn_feats={}, init_weights={}, norm_in={}, norm_out={},"
                     " snapshot={}, demorgan_reg={}".format(batch_size, num_epochs, snap_int, snap_dir, log_dir, learning_rate, alphas,
                                                            self.learn_feats, init_weights, self.norm_in, self.norm_out,
                                                            snapshot, self.demorgan_reg))

        # setup training network
        with self.graph.as_default() as graph:
            # Loss
            reg_loss = tf.reduce_mean(tf.losses.get_regularization_loss())
            norm_loss = tf.reduce_mean(0.5 * tf.pow(tf.norm(self.output_tn, axis=-1), 2.0))
            cls_loss = tf.losses.hinge_loss(self._ground_truth_ph, self.scores_tn, reduction=tf.losses.Reduction.MEAN)
            loss_tn = alphas[0] * norm_loss + alphas[1] * cls_loss + alphas[2] * reg_loss
            if self.demorgan_reg:
                dem_loss = tf.reduce_mean(0.5 * tf.pow(tf.norm(self.output_tn - self.dm_output_tn, axis=-1), 2.0))
                loss_tn = loss_tn + (alphas[3] * dem_loss)
                dem_loss_val, dem_loss_up, dem_loss_reset = _create_reset_metric(
                    tf.metrics.mean, 'epoch_dem_loss', values=dem_loss)
                tf.summary.scalar('dem_loss', dem_loss_val)

            pred = tf.greater(self.scores_tn, 0.0)
            # Metrics
            reg_loss_val, reg_loss_up, reg_loss_reset = _create_reset_metric(
                tf.metrics.mean, 'epoch_reg_loss', values=reg_loss)
            tf.summary.scalar('reg_loss', reg_loss_val)
            cls_loss_val, cls_loss_up, cls_loss_reset = _create_reset_metric(
                tf.metrics.mean, 'epoch_cls_loss', values=cls_loss)
            tf.summary.scalar('cls_loss', cls_loss_val)
            norm_loss_val, norm_loss_up, norm_loss_reset = _create_reset_metric(
                tf.metrics.mean, 'epoch_norm_loss', values=norm_loss)
            tf.summary.scalar('norm_loss', norm_loss_val)
            loss_val, loss_up, loss_reset = _create_reset_metric(
                tf.metrics.mean, 'epoch_loss', values=loss_tn)
            tf.summary.scalar('total_loss', loss_val)
            prec_val, prec_up, prec_reset = _create_reset_metric(
                tf.metrics.precision, 'epoch_prec', predictions=pred, labels=self._ground_truth_ph)
            tf.summary.scalar('Precision', prec_val)
            rec_val, rec_up, rec_reset = _create_reset_metric(
                tf.metrics.recall, 'epoch_rec', predictions=pred, labels=self._ground_truth_ph)
            tf.summary.scalar('Recall', rec_val)
            tf.summary.scalar('Fscore', (2 * prec_val * rec_val) / (prec_val + rec_val + 1e-6))
            summ_ops = tf.summary.merge_all()
            summ_writer = tf.summary.FileWriter(log_dir) if log_dir else None
            metrics_ops_reset = [reg_loss_reset, cls_loss_reset, norm_loss_reset, loss_reset, prec_reset, rec_reset]
            metrics_ops_update = [reg_loss_up, cls_loss_up, norm_loss_up, loss_up, prec_up, rec_up]
            if self.demorgan_reg:
                metrics_ops_reset += [dem_loss_reset]
                metrics_ops_update += [dem_loss_up]
            # Optimizer
            global_step_tn = tf.train.get_or_create_global_step(graph)
            optimizer = tf.train.AdamOptimizer(learning_rate)
            train_op = optimizer.minimize(loss_tn, global_step=global_step_tn, colocate_gradients_with_ops=True)
            init = tf.global_variables_initializer()
            tf_snap_tr = tf.train.Saver(max_to_keep=1)

        # Decompose expressions and compute labels
        _logger.info("Decomposing expressions...")
        valid_exps, pos_img_ites, neg_img_ites, exp_labels = [], [], [], []
        for exp_lst in expressions:
            for exp_term in dbac_expression.get_terms(dbac_expression.list2exp_parse(exp_lst)):
                term_lst = dbac_expression.exp2list_parse(exp_term)
                var_dic = {p: labels[:, int(p)] for p in dbac_expression.get_vars(exp_term)}
                term_labels = dbac_expression.eval_exp(exp_term, var_dic)
                if (term_lst not in valid_exps) and (exp_term.name != dbac_expression.OPS[0]) \
                        and (term_labels.sum() > 0) and (np.logical_not(term_labels).sum() > 0):
                    valid_exps.append(term_lst)
                    exp_labels.append(term_labels)
                    pos_img_ites.append(CycleIterator(list(np.where(term_labels)[0])))
                    neg_img_ites.append(CycleIterator(list(np.where(np.logical_not(term_labels))[0])))
        expressions = valid_exps
        exp_ite = CycleIterator(np.arange(len(expressions)).tolist())
        exp_labels = np.vstack(exp_labels).astype(np.float32)
        _logger.info("Total of expressions decomposed: {}".format(len(expressions)))

        # Initialization
        _logger.info("Initializing model...")
        self.tf_session.run(init)
        if self.init_vgg is not None:
            _logger.info("Loading features pre-trained weights")
            self.init_vgg(self.tf_session)
        if init_weights:
            _logger.info("Loading model pre-trained weights")
            self.load(init_weights)
        init_epoch = 0
        if snapshot:
            _logger.info("Loading from training snapshot")
            tf_snap_tr.restore(self.tf_session, snapshot)
            init_epoch = int((self.tf_session.run(global_step_tn) * batch_size) / len(expressions))

        # training loop
        _logger.info("Training...")
        for epoch in range(init_epoch, num_epochs):
            self.tf_session.run(metrics_ops_reset)
            for b in range(int(np.ceil(len(expressions) / batch_size))):
                # batch sampling
                b_exp_ids = [next(exp_ite) for _ in range(batch_size)]
                b_img_ids = [next(pos_img_ites[exp_id]) for _ in range(5) for exp_id in b_exp_ids]
                b_img_ids += [next(neg_img_ites[exp_id]) for _ in range(5) for exp_id in b_exp_ids]

                # compute image features
                if self.learn_feats:
                    b_img_feats = images_path[b_img_ids]
                else:
                    b_img_feats = self.feat_ext.compute(images_path[b_img_ids])

                # compute operations
                b_prims_rpr, b_op_switch = [], []
                for exp_id in b_exp_ids:
                    exp_tree = dbac_expression.list2exp_parse(expressions[exp_id])
                    b_op_switch.append({'NOT': 0, 'AND': 1, 'OR': 2}[exp_tree.name])
                    operand_a, operand_b = exp_tree.children if np.random.rand() > 0.5 else exp_tree.children[::-1]
                    b_prims_rpr.append(dbac_expression.exp2list_parse(operand_a))
                    b_prims_rpr.append(dbac_expression.exp2list_parse(operand_b))
                b_op_switch = np.array(b_op_switch)
                b_prims_rpr = self.inference(b_prims_rpr).reshape((len(b_exp_ids), 2 * self.dim))

                # compute labels
                b_exp_labels = exp_labels[b_exp_ids, :]
                b_exp_labels = b_exp_labels[:, b_img_ids]

                # run model
                self.tf_session.run(
                    [train_op, loss_tn] + metrics_ops_update,
                    feed_dict={self.is_training_ph: True,
                               self._images_ph: b_img_feats,
                               self._prims_rpr_ph: b_prims_rpr,
                               self._switch_ph: b_op_switch,
                               self._ground_truth_ph: b_exp_labels})

            if (epoch + 1) % 2 == 0:
                loss, prec, rec, summary = self.tf_session.run([loss_val, prec_val, rec_val, summ_ops])
                _logger.info("Epoch {}: Loss={:.4f}, Prec={:.2f}, Rec={:.2f}, Fsc={:.2f}"
                             .format((epoch + 1), loss, prec, rec, (2 * prec * rec) / (prec + rec + 1e-6)))
                if summ_writer:
                    summ_writer.add_summary(summary, global_step=epoch + 1)

            if snap_dir and (epoch + 1) % snap_int == 0:
                snap_file = os.path.join(snap_dir, 'nba_mlp_snap_E{}.npz'.format((epoch + 1)))
                self.save(snap_file)
                tf_snap_tr.save(self.tf_session, os.path.join(snap_dir, 'train.chk'), latest_filename='checkpoint.TRAIN')
                _logger.info("Model epoch {} snapshoted to {}".format(epoch + 1, snap_file))
コード例 #8
0
def _test(db_name, db_dir, db_split_file, db_comb_file, primitives_file, model_name, model_file, output_dir, kwargs_str=None):
    # processing kwargs
    kwargs_dic = dbac_util.get_kwargs_dic(kwargs_str)
    logger.info("Kwargs dictionary: {}".format(kwargs_dic))

    # read dataset and partitions
    logger.info("Reading dataset and split")
    db = dbac_data.IDataset.factory(db_name, db_dir)
    db.load_split(db_split_file, db_comb_file)
    train_imgs_path = db.images_path[db.images_split == dbac_data.DB_IMAGE_SPLITS.index('train')]
    train_labels = db.labels[db.images_split == dbac_data.DB_IMAGE_SPLITS.index('train')]
    val_imgs_path = db.images_path[db.images_split == dbac_data.DB_IMAGE_SPLITS.index('val')]
    val_labels = db.labels[db.images_split == dbac_data.DB_IMAGE_SPLITS.index('val')]
    test_imgs_path = db.images_path[db.images_split == dbac_data.DB_IMAGE_SPLITS.index('test')]
    test_labels = db.labels[db.images_split == dbac_data.DB_IMAGE_SPLITS.index('test')]

    if db_comb_file:
        logger.info("Loading compositions...")
        train_exps = db.combinations[db.combinations_split == dbac_data.DB_COMB_SPLITS.index('train')]
        test_exps = db.combinations[db.combinations_split == dbac_data.DB_COMB_SPLITS.index('test')]
    else:
        logger.info("Loading single expressions...")
        train_exps = db.expressions[db.expressions_split == dbac_data.DB_EXP_SPLITS.index('train')]
        test_exps = db.expressions[db.expressions_split == dbac_data.DB_EXP_SPLITS.index('test')]

    # Set up feature extractor
    logger.info("Configuring Features Extractor")
    feat_extractor = dbac_feature_ext.IFeatureExtractor.factory(dbac_feature_ext.FEAT_TYPE[1], **kwargs_dic)
    feat_extractor.load()

    # set up primitive collection
    logger.info("Configuring Primitive Collection")
    prim_collection = dbac_primitives.IPrimitiveCollection.factory(dbac_primitives.PRIMITIVE_TYPES[0], **kwargs_dic)
    prim_collection.load(primitives_file)

    # setup model
    logger.info("Configuring Model")
    model = dbac_model.IModel.factory(model_name, feat_extractor, prim_collection, **kwargs_dic, is_train=False)
    model.load(model_file)

    # test model
    logger.info("Testing on seen expressions on training images...")
    train_scores = model.score(train_imgs_path, train_exps, **kwargs_dic)
    logger.info("Testing on seen expressions on validation images...")
    val_scores = model.score(val_imgs_path, train_exps, **kwargs_dic)
    logger.info("Testing on unseen expressions on test images...")
    test_scores = model.score(test_imgs_path, test_exps, **kwargs_dic)

    # save results
    logger.info("Computing results.")
    report_dic = dict()
    results_iter = zip(['train', 'val', 'test'], [train_exps, train_exps, test_exps],
                       [train_labels, val_labels, test_labels], [train_scores, val_scores, test_scores],
                       [train_imgs_path, val_imgs_path, test_imgs_path])
    for key, exps, labels, scores, images in results_iter:
        # compute ground truth labels
        ground_truth = np.zeros_like(scores)
        for idx, exp_lst in enumerate(exps):
            exp_tree = dbac_expression.list2exp_parse(exp_lst)
            var_dic = {p: labels[:, int(p)] for p in dbac_expression.get_vars(exp_tree)}
            ground_truth[idx] = dbac_expression.eval_exp(exp_tree, var_dic)
        # fill report dictionary
        report_dic['_'.join([key, 'exps'])] = exps
        report_dic['_'.join([key, 'imgs'])] = images
        report_dic['_'.join([key, 'gt'])] = ground_truth
        report_dic['_'.join([key, 'pred'])] = scores
    result_file = os.path.join(output_dir, 'results.npy')
    np.save(result_file, report_dic)
    logger.info("Results file saved to {}.".format(result_file))