def test_deepflatten_copy1(): _hf.iterator_copy(deepflatten(toT([1, 2, 3, 4])))
def flatten(self, lst, depth=1): return list(deepflatten(lst, depth=depth))
def test_1(): test = [[1, 2], 3, [4, [5, [6]], 7]] expect(list(flatten(test))).to_equal(list(deepflatten(test)))
cog_operons = [] for t in operons: cog_operons.append([]) for x in t: xcogs = gi_to_cog(gi_to_cogs, int(x.id.split("|")[1])) if xcogs != 0: for xcog in xcogs: if xcog in interesting_cogs: if xcog not in all_closest_neighbours.keys(): all_closest_neighbours[xcog] = t[:] else: all_closest_neighbours[xcog] += t cog_operons[-1].append(xcog) # constructing table as dict (count collocations of every pair of COGs) for operon in cog_operons: for pair in Iterable(deepflatten(operon, ignore=str)).combinations(2): if pair[0] not in res_dict.keys(): res_dict[pair[0]] = {} if pair[1] not in res_dict[pair[0]].keys(): res_dict[pair[0]][pair[1]] = 0 res_dict[pair[0]][pair[1]] += 1 # symmetric pair if pair[1] not in res_dict.keys(): res_dict[pair[1]] = {} if pair[0] not in res_dict[pair[1]].keys(): res_dict[pair[1]][pair[0]] = 0 res_dict[pair[1]][pair[0]] += 1 # addd one operon to counter if pair[1] not in total_operons.keys(): total_operons[pair[1]] = 0 if pair[0] not in total_operons.keys():
def iu_deepflatten(a): return list(deepflatten(a, depth=1))
def test_deepflatten_types3(): assert list(deepflatten([[T(1)], T(2), ([T(3)], )], types=(list, tuple))) == toT([1, 2, 3])
def get_images_url(self) -> List[str]: "Retorna todas as urls de pokemon" return deepflatten( [s.get_images_url() for s in self.scrappers], depth=1 )
def test_deepflatten_pickle1(protocol): dpflt = deepflatten([[T(1)], [T(2)], [T(3)], [T(4)]]) assert next(dpflt) == T(1) x = pickle.dumps(dpflt, protocol=protocol) assert list(pickle.loads(x)) == toT([2, 3, 4])
def test_deepflatten_pickle2(protocol): dpflt = deepflatten([['abc', T(1)], [T(2)], [T(3)], [T(4)]]) assert next(dpflt) == 'a' x = pickle.dumps(dpflt, protocol=protocol) assert list(pickle.loads(x)) == ['b', 'c'] + toT([1, 2, 3, 4])
def test_deepflatten_failure_setstate5(): _hf.iterator_setstate_list_fail(deepflatten(toT([1, 2, 3, 4])))
def test_deepflatten_failure_setstate6(): _hf.iterator_setstate_empty_fail(deepflatten(toT([1, 2, 3, 4])))
def test_deepflatten_failure_setstate4(): # using __setstate__ to pass in an invalid currentdepth (too high) df = deepflatten(toT([1, 2, 3, 4])) with pytest.raises(ValueError): df.__setstate__(([iter(toT([1, 2, 3, 4]))], 5, 0))
def test_deepflatten_failure_setstate2(): # using __setstate__ to pass in an invalid iteratorlist (not iterator # inside) df = deepflatten(toT([1, 2, 3, 4])) with pytest.raises(TypeError): df.__setstate__(([set(toT([1, 2, 3, 4]))], 0, 0))
def test_deepflatten_failure_setstate1(): # using __setstate__ to pass in an invalid iteratorlist df = deepflatten(toT([1, 2, 3, 4])) with pytest.raises(TypeError): df.__setstate__(({'a', 'b', 'c'}, 0, 0))
def test_deepflatten_types1(): assert list(deepflatten([[T(1)], T(2), [[T(3)]]], types=list)) == toT([1, 2, 3])
def test_deepflatten_normal1(): assert list(deepflatten([T(1), T(2), T(3)])) == [T(1), T(2), T(3)]
def test_deepflatten_types2(): assert list(deepflatten([[T(1)], T(2), [[T(3)]]], types=tuple)) == [[T(1)], T(2), [[T(3)]]]
def test_deepflatten_normal2(): assert list(deepflatten([[T(1)], T(2), [[T(3)]]])) == toT([1, 2, 3])
def test_deepflatten_ignore1(): assert list(deepflatten([[T(1)], T(2), [[T(3), 'abc']]], ignore=str)) == [T(1), T(2), T(3), 'abc']
def test_deepflatten_containing_strings1(): # no endless recursion even if we have strings in the iterable assert list(deepflatten(["abc", "def"])) == ['a', 'b', 'c', 'd', 'e', 'f']
def get_images(self) -> List[bytes]: "Retorna todas as imagens de pokemons" return deepflatten([s.get_images() for s in self.scrappers], depth=1)
def test_deepflatten_containing_strings2(): # no endless recursion even if we have strings in the iterable and gave # strings as types assert list(deepflatten(["abc", "def"], types=str)) == ['a', 'b', 'c', 'd', 'e', 'f']
def RunExperiment(Xsource, Lsource, Xtarget, Ltarget, target, source, adaptation, classifier, imbalance='NoImb', distribution='KS', mode='adpt', repeat=10, fe=100): fres = create_dir('res' + mode.upper() + 'rq-DSNF-Selective/' + target + '->' + source) fp = create_dir('para' + mode.upper() + 'rq-DSNF-Selective/' + target + '->' + source) if adaptation in ['NNfilter', 'MCWs']: # supervised (repeat 10 times) for it in range(10): train, test, ytrain, ytest = train_test_split(Xtarget, Ltarget, test_size=0.9, random_state=42) num = repeat res = np.zeros((num, 2)) res_fir = np.zeros((num, 2)) res_lir = np.zeros((num, 2)) for i in range(num): if mode == 'adpt': optimizer = optParamAdpt(Xsource, Lsource, test, ytest, classifier, adaptation, imbalance, fe, train=train, Ltrain=ytrain) elif mode == 'all': optimizer = optParamAll(Xsource, Lsource, test, ytest, classifier, adaptation, imbalance, fe, train=train, Ltrain=ytrain) elif mode == 'seq': optimizer = optParamSEQ(Xsource, Lsource, test, ytest, classifier, adaptation, imbalance, fe, train=train, Ltrain=ytrain) elif mode == 'clf': optimizer = optParamCLF(Xsource, Lsource, test, ytest, classifier, adaptation, imbalance, fe, train=train, Ltrain=ytrain) elif mode == 'imb': optimizer = optParamIMB(Xsource, Lsource, test, ytest, classifier, adaptation, imbalance, fe, train=train, Ltrain=ytrain) elif mode == 'distadpt': optimizer = optParamDistAdpt(Xsource, Lsource, test, ytest, classifier, adaptation, imbalance, distribution, fe, train=train, Ltrain=ytrain) elif mode == 'dist': optimizer = optParamDist(Xsource, Lsource, test, ytest, classifier, adaptation, imbalance, distribution, fe, train=train, Ltrain=ytrain) try: res[i], his, best, res_fir[i], res_lir[i] = optimizer.run() except FunctionTimedOut: trails = optimizer.trails if mode == 'seq': if len(optimizer.trails.results) == 0: trails = optimizer.Atrails his = dict() try: his['name'] = list( trails.trials[0]['misc']['vals'].keys()) except: his['name'] = [None] j = 0 for item in trails.trials: # only record the results of successful runs if item['state'] == 2: results = list( deepflatten(item['misc']['vals'].values())) results.append(item['result']['result']) his[j] = results j += 1 if j > 0: inc_value = trails.best_trial['result']['result'] res[i] = np.asarray([optimizer.def_value, inc_value]) best = trails.best_trial['misc']['vals'] else: try: inc_value = trails.best_trial['result']['result'] res[i] = np.asarray( [optimizer.def_value, inc_value]) best = trails.best_trial['misc']['vals'] except: res[i] = np.asarray([0, 0]) best = [] # print the result into file with open( fres + adaptation + '-' + classifier + '-' + imbalance + '.txt', 'a+') as f: print(res[i], best, res_fir[i], res_lir[i], file=f) if i == num - 1: print('-----------------------------------------', file=f) with open( fp + adaptation + '-' + classifier + '-' + imbalance + '.txt', 'a+') as f: for item in his.values(): print(item, file=f) if i == num - 1: print('-----------------------------------------', file=f) else: # un-supervised (just 1 times) num = repeat res = np.zeros((num, 2)) res_fir = np.zeros((num, 2)) res_lir = np.zeros((num, 2)) for i in range(num): if mode == 'adpt': optimizer = optParamAdpt(Xsource, Lsource, Xtarget, Ltarget, classifier, adaptation, imbalance, fe) elif mode == 'all': optimizer = optParamAll(Xsource, Lsource, Xtarget, Ltarget, classifier, adaptation, imbalance, distribution, fe) elif mode == 'seq': optimizer = optParamSEQ(Xsource, Lsource, Xtarget, Ltarget, classifier, adaptation, imbalance, fe) elif mode == 'clf': optimizer = optParamCLF(Xsource, Lsource, Xtarget, Ltarget, classifier, adaptation, imbalance, fe) elif mode == 'imb': optimizer = optParamIMB(Xsource, Lsource, Xtarget, Ltarget, classifier, adaptation, imbalance, fe) elif mode == 'distadpt': optimizer = optParamDistAdpt(Xsource, Lsource, Xtarget, Ltarget, classifier, adaptation, imbalance, distribution, fe) elif mode == 'dist': optimizer = optParamDist(Xsource, Lsource, Xtarget, Ltarget, classifier, adaptation, imbalance, distribution, fe) try: res[i], his, best, res_fir[i], res_lir[i] = optimizer.run() except FunctionTimedOut: trails = optimizer.trails if mode == 'seq': if len(optimizer.trails.results) == 0: trails = optimizer.Atrails his = dict() try: his['name'] = list(trails.trials[0]['misc']['vals'].keys()) except: his['name'] = [None] j = 0 for item in trails.trials: # only record the results of successful runs if item['state'] == 2: results = list( deepflatten(item['misc']['vals'].values())) results.append(item['result']['result']) his[j] = results j += 1 if j > 0: inc_value = trails.best_trial['result']['result'] res[i] = np.asarray([optimizer.def_value, inc_value]) best = trails.best_trial['misc']['vals'] else: try: inc_value = trails.best_trial['result']['result'] res[i] = np.asarray([optimizer.def_value, inc_value]) best = trails.best_trial['misc']['vals'] except: res[i] = np.asarray([0, 0]) best = [] # print the result into file with open( fres + adaptation + '-' + classifier + '-' + imbalance + '.txt', 'a+') as f: print(res[i], best, res_fir[i], res_lir[i], file=f) with open( fp + adaptation + '-' + classifier + '-' + imbalance + '.txt', 'a+') as f: for item in his.values(): print(item, file=f)
def test_deepflatten_containing_strings3(): # mixed with strings assert list(deepflatten(["abc", ("def", ), "g", [[{'h'}], 'i']], )) == [ 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i' ]
# Build subprocess command command = 'Rscript' path2script = '../Best_Model.R' hyperparameters = { 'mtry': ['15'], 'ntree': ['1500'], 'nodesize': ['2'], 'numRandomCuts': ['1'] } def generate_hyperparameters_combination(hyperparameters_dictionary): all_hyper = list(hyperparameters.keys()) combinations = it.product(*(hyperparameters[hyper] for hyper in all_hyper)) return [list(comb) for comb in list(combinations)] all_hyperparameters = generate_hyperparameters_combination(hyperparameters) # Loop the folds for fold in folds: for hyper in all_hyperparameters: train_fold = str(fold) test_fold = str(fold + 1) args = [train_fold, test_fold, hyper] flattened_list = list(deepflatten(args, types=list)) cmd = [command, path2script] + flattened_list print(cmd) x = subprocess.check_output(cmd, universal_newlines=True)
def test_deepflatten_depth1(): assert list(deepflatten([T(1), T(2), T(3)], 1)) == toT([1, 2, 3])
def test_5(): test = [[1, 2], [[[[]]], 3], [[[4]], [5, [6]], 7]] expect(list(flatten(test))).to_equal(list(deepflatten(test)))
def test_deepflatten_depth2(): assert list(deepflatten([[T(1)], T(2), [[T(3)]]], 1)) == [T(1), T(2), [T(3)]]
list3_1.extend(list3) print(str7_1) print(list3_1) #8. 基于列表的扩展 list4 = [2, 2, 2, 2] print([2 * x for x in list4]) #列表的展开 list5 = [[1, 2, 3], [4, 5, 6], [4, 3], [1]] print([i for k in list5 for i in k]) #9. 将列表展开 #方法1 from iteration_utilities import deepflatten list6 = [[12, 5, 3], [2, 4, [5], [6, 9, 7]], [5, 8, [9, [10, 12]]]] print(list(deepflatten(list6))) #方法2 def flatten1(lst): res = [] for i in lst: if isinstance(i, list): res.extend(flatten1(i)) else: res.append(i) return res print(flatten1(list6))
def test_deepflatten_failure10(): # Check that everyting is working even if isinstance fails df = deepflatten(toT([1, 2, 3, 4]), ignore=_hf.FailingIsinstanceClass) with pytest.raises(_hf.FailingIsinstanceClass.EXC_TYP, match=_hf.FailingIsinstanceClass.EXC_MSG): list(df)