def setUp(self): normal_profile_selection = np.array([1000, 2000, 3000, 5000]) cutting_tolerance = 10 profiles_df = pd.read_excel("offerte_lilo_1.xlsx") self.dataset = profiles_df.sort_values(profiles_df.columns[0], ascending=False) self.permutations = Permutations(self.dataset.copy(), normal_profile_selection, cutting_tolerance)
def find_largest_number(self): permutation_class = Permutations() all_perms = permutation_class.get_all_permutations(self.arr) max_number = None for perm in all_perms: get_number = int("".join([str(num) for num in perm])) if max_number is None: max_number = get_number else: if max_number < get_number: max_number = get_number # print(get_number) return max_number
def setUp(self): normal_profile_selection = np.array([1000, 2000, 3000, 5000]) cutting_tolerance = 10 profiles_df = pd.read_excel("profillängen.xlsx") self.dataset = profiles_df.sort_values(profiles_df.columns[0], ascending=False) self.permutations = Permutations(self.dataset.copy(), normal_profile_selection, cutting_tolerance) self.depth = self.permutations.get_permutation_depth() self.permuted_dict = self.permutations.get_permuted_dataframes( self.dataset, self.depth)
def rearrange(hand, rest): """rest is a list of those tiles that can still be rearranged: No declared melds and no bonus tiles. done is already arranged, do not change this. TODO: also return how many tiles are missing for winning""" permutations = Permutations(rest) for variantMelds in permutations.variants: yield tuple(variantMelds), tuple()
class TestPermutations(TestCase): def setUp(self): normal_profile_selection = np.array([1000, 2000, 3000, 5000]) cutting_tolerance = 10 profiles_df = pd.read_excel("profillängen.xlsx") self.dataset = profiles_df.sort_values(profiles_df.columns[0], ascending=False) self.permutations = Permutations(self.dataset.copy(), normal_profile_selection, cutting_tolerance) self.depth = self.permutations.get_permutation_depth() self.permuted_dict = self.permutations.get_permuted_dataframes( self.dataset, self.depth) def test_permutation_depth_calculation(self): """ Tests if the calculated permutation depth is correct for the given dataset. """ self.assertEqual(5, self.depth) def test_permuted_dataframes_sums(self): """ Tests if the dictionary, containing permuted dataframes for every permutation depth, contains only permutations which sum() is smaller than the largest raw profile. """ for key in self.permuted_dict: self.assertFalse( (self.permuted_dict[key]["sum"].values > 5000).all()) def test_number_of_permutation_dataframes(self): permutation_df_counter = 0 for _ in self.permuted_dict: permutation_df_counter += 1 self.assertEqual(self.depth, permutation_df_counter) def test_permutation_dataframe_merging(self): permutations_df = self.permutations.merge_permutation_dataframes( self.permuted_dict) self.assertEqual(len(permutations_df.columns), self.depth + 2) def test_get_combinations(self): combinations_df = self.permutations.get_combinations_dataframe( self.dataset) self.assertEqual(len(combinations_df.columns), self.depth + 2)
def setUp(self): profiles_df = pd.read_excel("profillängen.xlsx") self.dataset = profiles_df.sort_values(profiles_df.columns[0], ascending=False) self.normal_profile_selection = np.array([1000, 2000, 3000, 5000]) self.cutting_tolerance = 10 # check for permutation depth self.pr = cProfile.Profile() self.pr.enable() permutations = Permutations(profiles_df.copy(), self.normal_profile_selection, self.cutting_tolerance) self.combinations_df = permutations.get_combinations_dataframe( self.dataset) """finish any test""" p = Stats(self.pr) p.strip_dirs() p.sort_stats('cumtime') p.print_stats() print "\n--->>>"
class ProfileCombinations(TestCase): def setUp(self): normal_profile_selection = np.array([1000, 2000, 3000, 5000]) cutting_tolerance = 10 profiles_df = pd.read_excel("offerte_lilo_1.xlsx") self.dataset = profiles_df.sort_values(profiles_df.columns[0], ascending=False) self.permutations = Permutations(self.dataset.copy(), normal_profile_selection, cutting_tolerance) #self.depth = self.permutations.get_permutation_depth() #self.permuted_dict = self.permutations.get_permuted_dataframes(self.dataset, self.depth) def test_profile_get_combination_depth(self): self.pr = cProfile.Profile() self.pr.enable() self.depth = self.permutations.get_permutation_depth() p = Stats(self.pr) p.strip_dirs() p.sort_stats('cumtime') p.print_stats() print "\n--->>>" def test_profile_combination_generator(self): self.pr = cProfile.Profile() self.pr.enable() self.depth = self.permutations.get_permutation_depth() self.combinations_df = self.permutations.get_combinations_dataframe( self.dataset) p = Stats(self.pr) p.strip_dirs() p.sort_stats('cumtime') p.print_stats() print "\n--->>>"
from permutations import Permutations res = [] p = Permutations(range(1,10),9) while True: r = p.get() r = [10]+r+[r[4]] v = r[0]+r[5]+r[5+1] for i in range(1,5): if r[i]+r[i+5]+r[i+5+1] != v: break else: res.append(r) if not p.next(): break for r in res: for i in range(5): print " %d,%d,%d " % (r[i],r[i+5],r[i+5+1]), print
elif args.dataset == 'cifar10': print("DL one later") elif args.dataset == 'cifar100': args.train_file = 'cifar100.pt' if not os.path.isdir(args.i): print('This dataset should be downloaded manually') elif args.dataset == 'CUB200': args.i = args.i = os.path.join(args.i, 'images') if not os.path.isdir(args.i): print('This dataset should be downloaded manually') if not os.path.exists(args.o): os.makedirs(args.o) args.i = os.path.join(args.i, 'processed') if args.task == 'rotations': DataFormatter = Rotations(args) elif args.task == 'permutations': DataFormatter = Permutations(args) elif args.task == 'disjoint': DataFormatter = Disjoint(args) elif args.task == 'cifar100': DataFormatter = Cifar100_Disjoint(args) elif args.task == 'CUB200': DataFormatter = CUB200_Disjoint(args) else: print("Not Implemented") DataFormatter.formating_data()
import datetime from rank_test import WilcoxonRank from permutations import Permutations excel_obj = WilcoxonRank() permutation_obj = Permutations() max_col = permutation_obj.get_total_column() max_row = permutation_obj.get_total_row() has_title = permutation_obj.get_has_title() first_column = permutation_obj.get_column_data(1) permutations, new_headers = permutation_obj.get_permutations() key_column = permutation_obj.get_column_data(2) key_column_data = None key_column_header = None first_column_data = None first_column_header = None final_data = {} non_key_column_data = [] headers = [] if has_title: headers = permutation_obj.get_row_data(1) if isinstance(key_column, dict): key_column_data = key_column["data"] key_column_header = key_column["header"] if isinstance(first_column, dict): first_column_data = first_column["data"] first_column_header = first_column["header"]
#%% #load profile lengths in pandas dataframe profiles_df = pd.read_excel(root.filename) profiles_df = profiles_df.sort_values(profiles_df.columns[0], ascending=False) #check which profiles are available result = messagebox.askyesno("Verfügbare Profile","Ist ein 5 m Profil verfügbar?") if not result: selection = no_five_profile_selection #check for cutting tolerance #check for permutation depth permutations = Permutations(profiles_df.copy(), selection, cutting_tolerance) permutation_depth = permutations.get_permutation_depth() permutation_depth = 3 #create permutations of values perm_df_dict = permutations.get_permuted_dataframes(profiles_df, permutation_depth) #create work list of remaining profiles profiles_array = profiles_df[profiles_df.columns[0]] remaining_profiles_array = profiles_array.copy() #help members raw_profile_list = [] garbage_array = np.array([]) id_counter = 0 #Define initial profile
def main(): test = TestPermutation() permutations = Permutations() test.test_permutation(permutations.is_permutation)