def testing(trainData, testData, methodIds): def evaluateFunc(config): return transform(config, trainData, testData) toolbox = base.Toolbox() creator.create( "FitnessMin", base.Fitness, weights=[-1.0], ) creator.create("Individual", array.array, typecode='d', fitness=creator.FitnessMin) toolbox.register("evaluate", evaluateFunc) pop = [creator.Individual(methodIds)] hof = tools.HallOfFame(1) fitness = toolbox.evaluate(pop[0]) pop[0].fitness.values = fitness hof.update(pop) best = hof[0] y_predict, y_acutal = abe_execute(S=get_setting_obj(best), train=trainData, test=testData) return mre_calc(y_predict, y_acutal), sa_calc(y_predict, y_acutal), best
def random_config(ft, dataset): """ Randomly generate an ABE hyperparameter. To reduce the error, use cross-validation (10 fold). :param ft: :param dataset: :return: average relative error [0,1] """ while True: X = ft.top_down_random(None) if ft.check_fulfill_valid(X): break logging.debug('=== Invalid configuration. Regenerating...') settings = ft_dict_to_ABE_setting(X) # print(settings) # data0, meta0 = arff.loadarff(dataset) all_error = list() for train, test in KFoldSplit_df(dataset, folds=len(dataset)): trainData = pd.DataFrame(data=train) testData = pd.DataFrame(data=test) error = abe_execute(S=settings, train=trainData, test=testData) all_error.append(error) return np.mean(all_error)
def calc_error1(bestConfigIndex, testData, allData): Y_predict, Y_actual = abe_execute(S=get_setting_obj(bestConfigIndex), data=testData) return g_mre(Y_actual, Y_predict, allData), g_msa(Y_actual, Y_predict, allData), ci_calc(Y_predict, Y_actual, testData)
def calc_error(bestConfigIndex, testData): Y_predict, Y_actual = abe_execute(S=get_setting_obj(bestConfigIndex), data=testData) return mre_calc(Y_predict, Y_actual), msa(Y_predict, Y_actual), ci_calc(Y_predict, Y_actual, testData)
def transform2(configurationIndex, data): """ see func transform :param configurationIndex: :param data: :return: """ Y_predict, Y_actual = abe_execute(S=get_setting_obj(configurationIndex), data=data) return [mre_calc(Y_predict, Y_actual), ci_calc(Y_predict, Y_actual, data)]
def transform(configurationIndex, data): """ Given trainDat, TestData and configuration indices, return the MRE of given test data set. :param configurationIndex: :param data: :return: """ Y_predict, Y_actual = abe_execute(S=get_setting_obj(configurationIndex), data=data) return mre_calc(Y_predict, Y_actual),