#test data test_holdout_dataset_id = [1134, 1495, 41147, 316, 1085, 1046, 1111, 55, 1116, 448, 1458, 162, 1101, 1561, 1061, 1506, 1235, 4135, 151, 51, 41138, 40645, 1510, 1158, 312, 38, 52, 1216, 41007, 1130] my_scorer = make_scorer(f1_score) total_search_time = 5*60#60*60#60*60#10 * 60 my_openml_datasets = [3, 4, 13, 15, 24, 25, 29, 31, 37, 38, 40, 43, 44, 49, 50, 51, 52, 53, 55, 56, 59, 151, 152, 153, 161, 162, 164, 172, 179, 310, 311, 312, 316, 333, 334, 335, 336, 337, 346, 444, 446, 448, 450, 451, 459, 461, 463, 464, 465, 466, 467, 470, 472, 476, 479, 481, 682, 683, 747, 803, 981, 993, 1037, 1038, 1039, 1040, 1042, 1045, 1046, 1048, 1049, 1050, 1053, 1054, 1055, 1056, 1059, 1060, 1061, 1062, 1063, 1064, 1065, 1066, 1067, 1068, 1069, 1071, 1073, 1075, 1085, 1101, 1104, 1107, 1111, 1112, 1114, 1116, 1119, 1120, 1121, 1122, 1123, 1124, 1125, 1126, 1127, 1128, 1129, 1130, 1131, 1132, 1133, 1134, 1135, 1136, 1137, 1138, 1139, 1140, 1141, 1142, 1143, 1144, 1145, 1146, 1147, 1148, 1149, 1150, 1151, 1152, 1153, 1154, 1155, 1156, 1157, 1158, 1159, 1160, 1161, 1162, 1163, 1164, 1165, 1166, 1167, 1169, 1216, 1235, 1236, 1237, 1238, 1240, 1412, 1441, 1442, 1443, 1444, 1447, 1448, 1449, 1450, 1451, 1452, 1453, 1455, 1458, 1460, 1461, 1462, 1463, 1464, 1467, 1471, 1473, 1479, 1480, 1484, 1485, 1486, 1487, 1488, 1489, 1490, 1494, 1495, 1496, 1498, 1502, 1504, 1506, 1507, 1510, 1511, 1547, 1561, 1562, 1563, 1564, 1597, 4134, 4135, 4154, 4329, 4534, 23499, 40536, 40645, 40646, 40647, 40648, 40649, 40650, 40660, 40665, 40666, 40669, 40680, 40681, 40690, 40693, 40701, 40705, 40706, 40710, 40713, 40714, 40900, 40910, 40922, 40999, 41005, 41007, 41138, 41142, 41144, 41145, 41146, 41147, 41150, 41156, 41158, 41159, 41160, 41161, 41162, 41228, 41430, 41521, 41538, 41976, 42172, 42477] for t_v in test_holdout_dataset_id: my_openml_datasets.remove(t_v) feature_names, feature_names_new = get_feature_names() def run_AutoML(trial, X_train=None, X_test=None, y_train=None, y_test=None, categorical_indicator=None): space = None search_time = None if not 'space' in trial.user_attrs: # which hyperparameters to use gen = SpaceGenerator() space = gen.generate_params() space.sample_parameters(trial) trial.set_user_attr('space', copy.deepcopy(space)) search_time, evaluation_time, memory_limit, privacy_limit, training_time_limit, inference_time_limit, pipeline_size_limit, cv, number_of_cvs, hold_out_fraction, sample_fraction, dataset_id = generate_parameters(trial, total_search_time, my_openml_datasets) else:
"rb")) model_success = pickle.load( open('/home/felix/phd2/picture_progress/al_only/my_great_model_success.p', "rb")) model_weights = pickle.load( open('/home/felix/phd2/picture_progress/weights/my_great_model_weights.p', "rb")) my_list_constraints = [ 'global_search_time_constraint', 'global_evaluation_time_constraint', 'global_memory_constraint', 'global_cv', 'global_number_cv', 'privacy', 'hold_out_fraction', 'sample_fraction', 'training_time_constraint', 'inference_time_constraint', 'pipeline_size_constraint' ] _, feature_names = get_feature_names(my_list_constraints) results_dict = {} for test_holdout_dataset_id in test_holdout_dataset_ids: X_train_hold, X_test_hold, y_train_hold, y_test_hold, categorical_indicator_hold, attribute_names_hold = get_data( test_holdout_dataset_id, randomstate=42) metafeature_values_hold = data2features(X_train_hold, y_train_hold, categorical_indicator_hold) #plot_most_important_features(model, feature_names, k=len(feature_names)) dynamic_approach = [] static_approach = []
my_scorer=make_scorer(f1_score) test_holdout_dataset_id = 31#1590#1218#4134#31#1139#31#1138#31 privacy = None X_train_hold, X_test_hold, y_train_hold, y_test_hold, categorical_indicator_hold, attribute_names_hold = get_data(test_holdout_dataset_id, randomstate=42) metafeature_values_hold = data2features(X_train_hold, y_train_hold, categorical_indicator_hold) try: model = pickle.load(open('/home/felix/phd2/picture_progress/my_great_model.p', "rb")) except: model = pickle.load(open('/tmp/my_great_model.p', "rb")) #model = pickle.load(open('/home/felix/phd2/my_meta_model/my_great_model.p', "rb") _, feature_names = get_feature_names() #plot_most_important_features(model, feature_names, k=len(feature_names)) dynamic_approach = [] static_approach = [] minutes_to_search = 2 #for minutes_to_search in range(1, 6): for memory_budget in [0.00000000000001]: current_dynamic = [] current_static = [] search_time_frozen = minutes_to_search * 60