예제 #1
0
def _trex_method(args, tree, X_test, X_train, y_train, seed, logger):

    global trex_explainer

    # train TREX
    if trex_explainer is None:
        trex_explainer = trex.TreeExplainer(tree, X_train, y_train,
                                            tree_kernel=args.tree_kernel,
                                            random_state=seed,
                                            true_label=args.true_label,
                                            kernel_model=args.kernel_model,
                                            verbose=args.verbose,
                                            val_frac=args.val_frac,
                                            logger=logger)

    # sort instances with highest positive influence first
    contributions_sum = np.zeros(X_train.shape[0])

    train_weight = trex_explainer.get_weight()[0]
    for i in tqdm.tqdm(range(X_test.shape[0])):
        train_sim = trex_explainer.similarity(X_test[[i]])[0]
        contributions = train_weight * train_sim
        contributions_sum += contributions

    train_order = np.argsort(contributions_sum)[::-1]
    return train_order
예제 #2
0
def _trex_method(args, model, test_ndx, X_test, X_train, y_train,
                 seed, logger=None):
    """
    Explains the predictions of each test instance.
    """
    start = time.time()
    explainer = trex.TreeExplainer(model, X_train, y_train,
                                   tree_kernel=args.tree_kernel,
                                   random_state=seed,
                                   true_label=args.true_label,
                                   kernel_model=args.kernel_model,
                                   verbose=args.verbose,
                                   val_frac=args.val_frac,
                                   logger=logger)
    fine_tune = time.time() - start

    start = time.time()
    explainer.explain(X_test[test_ndx].reshape(1, -1))
    test_time = time.time() - start

    return fine_tune, test_time
예제 #3
0
def experiment(args, logger, out_dir, seed):

    # get model and data
    clf = model_util.get_classifier(args.tree_type,
                                    n_estimators=args.n_estimators,
                                    max_depth=args.max_depth,
                                    random_state=args.rs)

    X_train, X_test, y_train, y_test, label = data_util.get_data(
        args.dataset, random_state=args.rs, data_dir=args.data_dir)

    # reduce train size
    if args.train_frac < 1.0 and args.train_frac > 0.0:
        n_train = int(X_train.shape[0] * args.train_frac)
        X_train, y_train = X_train[:n_train], y_train[:n_train]
    data = X_train, y_train, X_test, y_test

    logger.info('train instances: {}'.format(len(X_train)))
    logger.info('test instances: {}'.format(len(X_test)))
    logger.info('no. features: {}'.format(X_train.shape[1]))

    logger.info('no. trees: {:,}'.format(args.n_estimators))
    logger.info('max depth: {}'.format(args.max_depth))

    # train a tree ensemble
    logger.info('fitting tree ensemble...')
    tree = clf.fit(X_train, y_train)

    if args.teknn:

        # transform data
        extractor = trex.TreeExtractor(tree, tree_kernel=args.tree_kernel)

        logger.info('transforming training data...')
        X_train_alt = extractor.fit_transform(X_train)

        logger.info('transforming test data...')
        X_test_alt = extractor.transform(X_test)

        train_label = y_train if args.true_label else tree.predict(X_train)

        # tune and train teknn
        start = time.time()
        logger.info('TE-KNN...')
        if args.k:
            knn_clf = KNeighborsClassifier(n_neighbors=args.k,
                                           weights='uniform')
            knn_clf = knn_clf.fit(X_train_alt, y_train)
        else:
            knn_clf = exp_util.tune_knn(tree,
                                        X_train,
                                        X_train_alt,
                                        train_label,
                                        args.val_frac,
                                        seed=seed,
                                        logger=logger)

        start = time.time()
        logger.info('generating predictions...')
        results = _get_knn_predictions(tree,
                                       knn_clf,
                                       X_test,
                                       X_test_alt,
                                       y_train,
                                       pred_size=args.pred_size,
                                       out_dir=out_dir,
                                       logger=logger)
        logger.info('time: {:.3f}s'.format(time.time() - start))

        # save results
        if results:
            results['n_neighbors'] = knn_clf.get_params()['n_neighbors']
            np.save(os.path.join(out_dir, 'tree.npy'), results['tree'])
            np.save(os.path.join(out_dir, 'surrogate.npy'), results['teknn'])

    if args.trex:

        start = time.time()
        explainer = trex.TreeExplainer(tree,
                                       X_train,
                                       y_train,
                                       tree_kernel=args.tree_kernel,
                                       kernel_model=args.kernel_model,
                                       random_state=args.rs,
                                       logger=logger,
                                       true_label=not args.true_label,
                                       val_frac=args.val_frac)

        start = time.time()
        logger.info('generating predictions...')
        results = _get_trex_predictions(tree, explainer, data)
        logger.info('time: {:.3f}s'.format(time.time() - start))

        results['C'] = explainer.C

        # save data
        np.save(os.path.join(out_dir, 'tree.npy'), results['tree'])
        np.save(
            os.path.join(out_dir, 'surrogate.npy'.format(args.kernel_model)),
            results['trex'])
예제 #4
0
def experiment(args, logger, out_dir, seed):
    """
    Main method that trains a tree ensemble, flips a percentage of train labels, prioritizes train
    instances using various methods, and computes how effective each method is at cleaning the data.
    """

    # get model and data
    clf = model_util.get_classifier(args.tree_type,
                                    n_estimators=args.n_estimators,
                                    max_depth=args.max_depth,
                                    random_state=seed)

    X_train, X_test, y_train, y_test, label = data_util.get_data(args.dataset,
                                                                 random_state=seed,
                                                                 data_dir=args.data_dir)

    # reduce train size
    if args.train_frac < 1.0 and args.train_frac > 0.0:
        n_train = int(X_train.shape[0] * args.train_frac)
        X_train, y_train = X_train[:n_train], y_train[:n_train]
    data = X_train, y_train, X_test, y_test

    logger.info('no. train instances: {:,}'.format(len(X_train)))
    logger.info('no. test instances: {:,}'.format(len(X_test)))
    logger.info('no. features: {:,}'.format(X_train.shape[1]))

    # add noise
    y_train_noisy, noisy_ndx = data_util.flip_labels(y_train, k=args.flip_frac, random_state=seed)
    noisy_ndx = np.array(sorted(noisy_ndx))
    logger.info('no. noisy labels: {:,}'.format(len(noisy_ndx)))

    # train a tree ensemble on the clean and noisy labels
    model = clone(clf).fit(X_train, y_train)
    model_noisy = clone(clf).fit(X_train, y_train_noisy)

    # show model performance before and after noise
    logger.info('\nBefore noise:')
    model_util.performance(model, X_train, y_train, X_test=X_test, y_test=y_test, logger=logger)
    logger.info('\nAfter noise:')
    model_util.performance(model_noisy, X_train, y_train_noisy, X_test=X_test, y_test=y_test, logger=logger)

    # check accuracy before and after noise
    acc_test_clean = accuracy_score(y_test, model.predict(X_test))
    acc_test_noisy = accuracy_score(y_test, model_noisy.predict(X_test))

    # find how many corrupted/non-corrupted labels were incorrectly predicted
    if not args.true_label:
        logger.info('\nUsing predicted labels:')
        predicted_labels = model_noisy.predict(X_train).flatten()
        incorrect_ndx = np.where(y_train_noisy != predicted_labels)[0]
        incorrect_corrupted_ndx = np.intersect1d(noisy_ndx, incorrect_ndx)
        logger.info('incorrectly predicted corrupted labels: {:,}'.format(incorrect_corrupted_ndx.shape[0]))
        logger.info('total number of incorrectly predicted labels: {:,}'.format(incorrect_ndx.shape[0]))

    # number of checkpoints to record
    n_check = int(len(y_train) * args.check_pct)
    interval = (n_check / len(y_train)) / args.n_plot_points

    # random method
    logger.info('\nordering by random...')
    start = time.time()
    ckpt_ndx, fix_ndx = _random_method(noisy_ndx, y_train, interval,
                                       to_check=n_check,
                                       random_state=seed)
    check_pct, random_res = _interval_performance(ckpt_ndx, fix_ndx, noisy_ndx, clf, data, acc_test_noisy)
    logger.info('time: {:3f}s'.format(time.time() - start))
    np.save(os.path.join(out_dir, 'random.npy'), random_res)

    # save global lines
    np.save(os.path.join(out_dir, 'test_clean.npy'), acc_test_clean)
    np.save(os.path.join(out_dir, 'check_pct.npy'), check_pct)

    # tree loss method
    logger.info('\nordering by tree loss...')
    start = time.time()

    y_train_proba = model_noisy.predict_proba(X_train)
    ckpt_ndx, fix_ndx, _, _ = _loss_method(noisy_ndx, y_train_proba, y_train_noisy, interval, to_check=n_check)
    _, tree_loss_res = _interval_performance(ckpt_ndx, fix_ndx, noisy_ndx, clf, data, acc_test_noisy)

    logger.info('time: {:3f}s'.format(time.time() - start))
    np.save(os.path.join(out_dir, 'tree.npy'), tree_loss_res)

    # trex method
    if args.trex:
        logger.info('\nordering by TREX...')
        start = time.time()
        explainer = trex.TreeExplainer(model_noisy, X_train, y_train_noisy,
                                       tree_kernel=args.tree_kernel,
                                       random_state=seed,
                                       true_label=args.true_label,
                                       kernel_model=args.kernel_model,
                                       verbose=args.verbose,
                                       val_frac=args.val_frac,
                                       logger=logger)

        ckpt_ndx, fix_ndx, _ = _our_method(explainer, noisy_ndx, y_train, n_check, interval)
        check_pct, trex_res = _interval_performance(ckpt_ndx, fix_ndx, noisy_ndx, clf, data, acc_test_noisy)

        logger.info('time: {:3f}s'.format(time.time() - start))
        np.save(os.path.join(out_dir, 'method.npy'), trex_res)

        # trex loss method
        logger.info('\nordering by TREX loss...')
        start = time.time()

        y_train_proba = explainer.predict_proba(X_train)
        ckpt_ndx, fix_ndx, _, _ = _loss_method(noisy_ndx, y_train_proba, y_train_noisy, interval, to_check=n_check)
        _, trex_loss_res = _interval_performance(ckpt_ndx, fix_ndx, noisy_ndx, clf, data, acc_test_noisy)

        logger.info('time: {:3f}s'.format(time.time() - start))
        np.save(os.path.join(out_dir, 'method_loss.npy'), trex_loss_res)

    # influence method
    if args.tree_type == 'cb' and args.inf_k is not None:
        logger.info('\nordering by leafinfluence...')
        start = time.time()

        model_path = '.model.json'
        model_noisy.save_model(model_path, format='json')

        if args.inf_k == -1:
            update_set = 'AllPoints'
        elif args.inf_k == 0:
            update_set = 'SinglePoint'
        else:
            update_set = 'TopKLeaves'

        leaf_influence = CBLeafInfluenceEnsemble(model_path, X_train, y_train_noisy, k=args.inf_k,
                                                 learning_rate=model.learning_rate_, update_set=update_set)
        ckpt_ndx, fix_ndx, _, _ = _influence_method(leaf_influence, noisy_ndx, X_train, y_train, y_train_noisy,
                                                    interval, to_check=n_check)
        _, leafinfluence_res = _interval_performance(ckpt_ndx, fix_ndx, noisy_ndx, clf, data, acc_test_noisy)

        logger.info('time: {:3f}s'.format(time.time() - start))
        np.save(os.path.join(out_dir, 'method.npy'), leafinfluence_res)

    # MAPLE method
    if args.maple:
        logger.info('\nordering by MAPLE...')
        start = time.time()

        train_label = y_train_noisy if args.true_label else model_noisy.predict(X_train)
        maple_exp = MAPLE(X_train, train_label, X_train, train_label, verbose=args.verbose, dstump=False)
        ckpt_ndx, fix_ndx, map_scores, map_order = _maple_method(maple_exp, X_train, noisy_ndx, interval,
                                                                 to_check=n_check)
        _, maple_res = _interval_performance(ckpt_ndx, fix_ndx, noisy_ndx, clf, data, acc_test_noisy)

        logger.info('time: {:3f}s'.format(time.time() - start))
        np.save(os.path.join(out_dir, 'method.npy'), maple_res)

    # TEKNN method
    if args.teknn:
        logger.info('\nordering by teknn...')
        start = time.time()

        # transform the data
        extractor = trex.TreeExtractor(model_noisy, tree_kernel=args.tree_kernel)
        X_train_alt = extractor.fit_transform(X_train)
        train_label = y_train if args.true_label else model_noisy.predict(X_train)

        # tune and train teknn
        knn_clf = exp_util.tune_knn(model_noisy, X_train, X_train_alt, train_label, args.val_frac,
                                    seed=seed, logger=logger)

        ckpt_ndx, fix_ndx, _ = _knn_method(knn_clf, X_train_alt, noisy_ndx, interval, to_check=n_check)
        _, teknn_res = _interval_performance(ckpt_ndx, fix_ndx, noisy_ndx, clf, data, acc_test_noisy)

        logger.info('time: {:3f}s'.format(time.time() - start))
        np.save(os.path.join(out_dir, 'method.npy'), teknn_res)

        # TEKNN loss method
        logger.info('\nordering by teknn loss...')
        start = time.time()
        y_train_proba = knn_clf.predict_proba(X_train_alt)

        ckpt_ndx, fix_ndx, _, _ = _loss_method(noisy_ndx, y_train_proba, y_train_noisy, interval, to_check=n_check)
        _, teknn_loss_res = _interval_performance(ckpt_ndx, fix_ndx, noisy_ndx, clf, data, acc_test_noisy)

        logger.info('time: {:3f}s'.format(time.time() - start))
        np.save(os.path.join(out_dir, 'method_loss.npy'), teknn_loss_res)

    # MMD-Critic method
    if args.mmd:
        logger.info('\nordering by mmd-critic...')
        start = time.time()
        ckpt_ndx, fix_ndx = _mmd_method(model_noisy, X_train, y_train_noisy, noisy_ndx, interval, n_check)
        _, mmd_res = _interval_performance(ckpt_ndx, fix_ndx, noisy_ndx, clf, data, acc_test_noisy)

        logger.info('time: {:3f}s'.format(time.time() - start))
        np.save(os.path.join(out_dir, 'method.npy'), mmd_res)

    # Prototype method
    if args.proto:
        logger.info('\nordering by proto...')
        start = time.time()
        ckpt_ndx, fix_ndx = _proto_method(model_noisy, X_train, y_train_noisy, noisy_ndx, interval, n_check)
        _, proto_res = _interval_performance(ckpt_ndx, fix_ndx, noisy_ndx, clf, data, acc_test_noisy)

        logger.info('time: {:3f}s'.format(time.time() - start))
        np.save(os.path.join(out_dir, 'method.npy'), proto_res)
예제 #5
0
def experiment(args, logger, out_dir, seed):

    # get model and data
    clf = model_util.get_classifier(args.tree_type,
                                    n_estimators=args.n_estimators,
                                    max_depth=args.max_depth,
                                    random_state=seed)

    data = data_util.get_data(args.dataset,
                              random_state=seed,
                              data_dir=args.data_dir,
                              return_feature=True)
    X_train, X_test, y_train, y_test, label, feature = data

    logger.info('train instances: {:,}'.format(len(X_train)))
    logger.info('test instances: {:,}'.format(len(X_test)))
    logger.info('no. features: {:,}'.format(X_train.shape[1]))

    # train a tree ensemble and explainer
    tree = clone(clf).fit(X_train, y_train)
    model_util.performance(tree,
                           X_train,
                           y_train,
                           X_test,
                           y_test,
                           logger=logger)

    original_auc = roc_auc_score(y_test, tree.predict_proba(X_test)[:, 1])
    original_acc = accuracy_score(y_test, tree.predict(X_test))

    # train TREX
    explainer = trex.TreeExplainer(
        tree,
        X_train,
        y_train,
        tree_kernel=args.tree_kernel,
        random_state=seed,
        kernel_model=args.kernel_model,
        kernel_model_kernel=args.kernel_model_kernel,
        true_label=args.true_label)

    # get missed test instances
    missed_indices = np.where(tree.predict(X_test) != y_test)[0]

    np.random.seed(seed)
    explain_indices = np.random.choice(
        missed_indices,
        replace=False,
        size=int(len(missed_indices) * args.sample_frac))

    logger.info('no. incorrect instances: {:,}'.format(len(missed_indices)))
    logger.info('no. explain instances: {:,}'.format(len(explain_indices)))

    # compute total impact of train instances on test instances
    contributions = explainer.explain(X_test[explain_indices],
                                      y=y_test[explain_indices])
    impact_sum = np.sum(contributions, axis=0)

    # get train instances that impact the predictions
    neg_contributors = np.where(impact_sum < 0)[0]
    neg_impact = impact_sum[neg_contributors]
    neg_contributors = neg_contributors[np.argsort(neg_impact)]

    # remove offending train instances in segments and measure performance
    aucs = []
    accs = []
    n_removed = []
    for i in tqdm.tqdm(range(args.n_iterations + 1)):

        # remove these instances from the train data
        delete_ndx = neg_contributors[:args.n_remove * i]
        new_X_train = np.delete(X_train, delete_ndx, axis=0)
        new_y_train = np.delete(y_train, delete_ndx)

        tree = clone(clf).fit(new_X_train, new_y_train)

        aucs.append(roc_auc_score(y_test, tree.predict_proba(X_test)[:, 1]))
        accs.append(accuracy_score(y_test, tree.predict(X_test)))

        n_removed.append(args.n_remove * i)

    # save results
    result = tree.get_params()
    result['original_auc'] = original_auc
    result['original_acc'] = original_acc
    result['auc'] = aucs
    result['acc'] = accs
    result['n_remove'] = n_removed
    np.save(os.path.join(out_dir, 'results.npy'), result)
예제 #6
0
def experiment(args, logger, out_dir, seed):

    # get model and data
    clf = model_util.get_classifier(args.tree_type,
                                    n_estimators=args.n_estimators,
                                    max_depth=args.max_depth,
                                    random_state=seed)

    data = data_util.get_data(args.dataset,
                              random_state=seed,
                              data_dir=args.data_dir,
                              return_image_id=True,
                              test_size=args.test_size)
    X_train, X_test, y_train, y_test, label = data

    logger.info('train instances: {}'.format(len(X_train)))
    logger.info('test instances: {}'.format(len(X_test)))
    logger.info('labels: {}'.format(label))

    if args.pca_components is not None:
        logger.info('{} to {} using PCA...'.format(X_train.shape[1],
                                                   args.pca_components))
        pca = PCA(args.pca_components, random_state=args.rs).fit(X_train)
        X_train_pca = pca.transform(X_train)
        X_test_pca = pca.transform(X_test)

    # fit a tree ensemble and an explainer for that tree ensemble
    logger.info('fitting {}...'.format(args.tree_type))
    tree = clone(clf).fit(X_train_pca, y_train)

    # show GBDT performance
    model_util.performance(tree,
                           X_train_pca,
                           y_train,
                           X_test_pca,
                           y_test,
                           logger=logger)

    logger.info('fitting TREX...')
    explainer = trex.TreeExplainer(tree,
                                   X_train_pca,
                                   y_train,
                                   tree_kernel=args.tree_kernel,
                                   random_state=seed,
                                   kernel_model=args.kernel_model,
                                   val_frac=args.val_frac,
                                   verbose=args.verbose,
                                   true_label=args.true_label,
                                   cv=2,
                                   logger=logger)

    # pick a random test instance to explain
    if args.random_test:
        np.random.seed(seed)
        test_ndx = np.random.choice(y_test)

    # pick a random mispredicted test instance to explain
    else:
        # y_test_label = explainer.le_.transform(y_test)
        # test_dist = exp_util.instance_loss(tree.predict_proba(X_test_pca), y_test_label)
        test_dist = exp_util.instance_loss(tree.predict_proba(X_test_pca),
                                           y_test)
        test_dist_ndx = np.argsort(test_dist)[::-1]
        np.random.seed(seed)
        test_ndx = np.random.choice(test_dist_ndx[:50])

    x_test = X_test_pca[test_ndx].reshape(1, -1)
    test_pred = tree.predict(x_test)[0]
    test_actual = y_test[test_ndx]

    # compute the impact of each training instance
    impact = explainer.explain(x_test)[0]
    alpha = explainer.get_weight()[0]
    sim = explainer.similarity(x_test)[0]

    # sort the training instances by impact in descending order
    sort_ndx = np.argsort(impact)[::-1]

    # matplotlib settings
    plt.rc('font', family='serif')
    plt.rc('xtick', labelsize=17)
    plt.rc('ytick', labelsize=17)
    plt.rc('axes', labelsize=22)
    plt.rc('axes', titlesize=22)
    plt.rc('legend', fontsize=18)
    plt.rc('legend', title_fontsize=11)
    plt.rc('lines', linewidth=1)
    plt.rc('lines', markersize=6)

    # matplotlib settings
    plt.rc('font', family='serif')
    plt.rc('xtick', labelsize=13)
    plt.rc('ytick', labelsize=13)
    plt.rc('axes', labelsize=13)
    plt.rc('axes', titlesize=13)
    plt.rc('legend', fontsize=11)
    plt.rc('legend', title_fontsize=11)
    plt.rc('lines', linewidth=1)
    plt.rc('lines', markersize=6)

    # inches
    width = 5.5  # Neurips 2020
    width, height = set_size(width=width * 3, fraction=1, subplots=(1, 3))
    fig, axs = plt.subplots(2,
                            1 + args.topk_train * 2,
                            figsize=(width, height))

    print(axs.shape)

    # plot the test image
    identifier = 'test_id{}'.format(test_ndx)
    _display_image(args,
                   X_test[test_ndx],
                   identifier=identifier,
                   predicted=test_pred,
                   actual=test_actual,
                   ax=axs[0][0])
    plt.setp(axs[0][0].spines.values(), color='blue')

    topk_train = args.topk_train if args.show_negatives else args.topk_train * 2

    # show positive train images
    for i, train_ndx in enumerate(sort_ndx[:topk_train]):
        i += 1
        identifier = 'train_id{}'.format(train_ndx)
        train_pred = tree.predict(X_train_pca[train_ndx].reshape(1, -1))[0]
        similarity = sim[train_ndx] if args.show_similarity else None
        weight = alpha[train_ndx] if args.show_weight else None
        plt.setp(axs[0][i].spines.values(), color='green')
        _display_image(args,
                       X_train[train_ndx],
                       ax=axs[0][i],
                       identifier=identifier,
                       predicted=train_pred,
                       actual=y_train[train_ndx],
                       similarity=similarity,
                       weight=weight)

    # show negative train images
    if args.show_negatives:
        for i, train_ndx in enumerate(sort_ndx[::-1][:topk_train]):
            i += 1 + args.topk_train
            identifier = 'train_id{}'.format(train_ndx)
            train_pred = tree.predict(X_train_pca[train_ndx].reshape(1, -1))[0]
            similarity = sim[train_ndx] if args.show_similarity else None
            weight = alpha[train_ndx] if args.show_weight else None
            plt.setp(axs[0][i].spines.values(), color='red')
            _display_image(args,
                           X_train[train_ndx],
                           ax=axs[0][i],
                           identifier=identifier,
                           predicted=train_pred,
                           actual=y_train[train_ndx],
                           similarity=similarity,
                           weight=weight)

    plt.savefig(os.path.join(out_dir, 'plot.pdf'),
                format='pdf',
                bbox_inches='tight')
    plt.show()

    # show highest weighted and lowest weighted samples for each class
    alpha_indices = np.argsort(alpha)

    print(alpha_indices)

    # plot highest negative weighted samples
    for i, train_ndx in enumerate(alpha_indices[:topk_train]):
        i += 1
        identifier = 'train_id{}'.format(train_ndx)
        train_pred = tree.predict(X_train_pca[train_ndx].reshape(1, -1))[0]
        similarity = sim[train_ndx] if args.show_similarity else None
        weight = alpha[train_ndx] if args.show_weight else None
        plt.setp(axs[1][i].spines.values(), color='red')
        _display_image(args,
                       X_train[train_ndx],
                       ax=axs[1][i],
                       identifier=identifier,
                       predicted=train_pred,
                       actual=y_train[train_ndx],
                       similarity=similarity,
                       weight=weight)

    # plot highest positive weighted samples
    for i, train_ndx in enumerate(alpha_indices[::-1][:topk_train]):
        i += 1 + args.topk_train
        identifier = 'train_id{}'.format(train_ndx)
        train_pred = tree.predict(X_train_pca[train_ndx].reshape(1, -1))[0]
        similarity = sim[train_ndx] if args.show_similarity else None
        weight = alpha[train_ndx] if args.show_weight else None
        plt.setp(axs[1][i].spines.values(), color='green')
        _display_image(args,
                       X_train[train_ndx],
                       ax=axs[1][i],
                       identifier=identifier,
                       predicted=train_pred,
                       actual=y_train[train_ndx],
                       similarity=similarity,
                       weight=weight)