示例#1
0
def run_main(args, datasets, model, exp_logger):
    if args.task == 'cds':
        utt_data = load_candidates(args)
        model.add_candidate_data(*utt_data)
    kb_labels = {}
    if args.use_kb:
        kb_labels['intent'] = list(model.mappings['intent'].keys())
        kb_labels['action'] = list(model.mappings['action'].keys())

    exp_logger.init_tb_writers()
    run_train(args, datasets, model, exp_logger, kb_labels)

    if args.do_eval:
        result = run_eval(args,
                          datasets,
                          model,
                          exp_logger,
                          kb_labels,
                          split='test')
        results = dict((k + f'_{args.filename}', v) for k, v in result.items())
        print('Test Results -', results)
示例#2
0
    # Gathers the input arguments
    args = get_arguments()

    # Gathering variables from arguments
    dataset = args.dataset
    descriptor = args.descriptor
    step = 'val'
    fold = args.fold
    type = args.type
    meta = args.mh

    # Random seed for experimental consistency
    np.random.seed(fold-1)

    # Loads the predictions and labels
    preds, y = l.load_candidates(dataset, step, fold)

    # If descriptor is global-based
    if descriptor == 'global':
        # Gets the global predictors
        preds = preds[:, :35]

    # If descriptor is cnn-based
    elif descriptor == 'cnn':
        # Gets the CNN predictors
        preds = preds[:, 35:]

    # Checks if the type of used ensemble was weight-based
    if type == 'weight':
        # Defining function to be optimized
        opt_fn = e.weighted_classifiers(preds, y)
    # Gathering variables from arguments
    dataset = args.dataset

    # Creating empty lists to save outputs
    val_accs, test_accs = [], []
    val_time, test_time = [], []

    print(f'\nPerforming majority voting over {dataset} ...\n')

    # For each possible fold
    for k in range(c.N_FOLDS):
        print(f'Fold {k+1}/{c.N_FOLDS}:')

        # Loads the validation step predictions and labels
        val_pred, val_y = l.load_candidates(dataset, 'val', k + 1)

        # Loads the testing step predictions and labels
        test_pred, test_y = l.load_candidates(dataset, 'test', k + 1)

        # Defining the starting time of validation majority voting
        start_v = time.time()

        # Gather the majority votes between validation predictions
        val_votes = e.majority_voting(val_pred)

        # Defining the ending time of validation majority voting
        end_v = time.time()

        # Defining the starting time of testing majority voting
        start_t = time.time()
    descriptor = args.descriptor
    fold = args.fold
    type = args.type
    meta = args.mh

    # Defining an input file
    input_file = f'output/{meta}_{type}_{dataset}_val_{fold}.pkl'

    # Creating a History object
    h = History()

    # Loading the input file
    h.load(input_file)

    # Loading the predictions and labels
    preds, y = l.load_candidates(dataset, 'test', fold)

    # If descriptor is global-based
    if descriptor == 'global':
        # Gets the global predictors
        preds = preds[:, :35]

    # If descriptor is cnn-based
    elif descriptor == 'cnn':
        # Gets the CNN predictors
        preds = preds[:, 35:]

    # Gathering the best weights
    best_weights = np.asarray(h.best_agent[-1][0])

    # Checks if the type of used ensemble was weight-based