def _image(self): """The literal image of the function""" return Set(self(elem) for elem in self.domain)
def testRemove(self): newSet = Set([1, 2, 3, 4, 1, 3, 7]) newSet.remove(1) setB = Set([1]) self.assertFalse(setB.isIncluded(newSet))
def testIncluded(self): newSet = Set([1, 2, 5]) setB = Set([2, 3, 1, 6, 2, 5]) self.assertTrue(newSet.isIncluded(setB))
if not x: break print("total phrases per class = ", n_elements) print("total number of classes = ", count) l = LSA(MAX_GRAM, MIN_FREQ, P_EIG, x) print("Parameters: Min_freq =", l.min_freq,"NGram_max =", l.ngram_max, "P_eig =", l.p_eig*100) print("LSA created.") ########################### # LSA human_keywords = l.manage_keywords(f.keywords) lsa_results = l.train_phrases(human_keywords) print("LSA Results computed.") sets = Set(lsa_results, numpy.array(y), numpy.array(x)) for i in range(len(sets.x_train)): ########################### ########################### # NAIVE BAYES naive = NaiveBayesClassifier(alpha=ALPHA) naive.train(numpy.array(sets.x_train[i]), sets.y_train[i]) test_score.append(naive.test_score(numpy.array(sets.x_test[i]), numpy.array(sets.y_test[i]))) if not test_score: break elements.append(n_elements) avg = numpy.round(numpy.average(numpy.array(test_score)), 2) classification.append(avg) min_ = numpy.round(numpy.array(test_score).min(), 2) classificationerrormin.append(numpy.round(avg - min_, 2))
from Set import Set set = Set(4, 0, "FIFO") addresses = [0, 1, 2, 3, 4, 0, 4, 3, 6] for i in addresses: set.insert(i) print(set.set_print())
def test_set_to_seq(): toseqset = Set([1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 1]) assert toseqset.to_seq() == [1, 2, 3, 4, 5], "Test Case Failed" toseqset.add(1) assert toseqset.to_seq() == [1, 2, 3, 4, 5], "Test Case Failed"
def __init__(self): self.listedges = [] self.listset = [] self.MST = Set() self.MSTEdges = []
def test_size_zero(self): test = Set([]) assert test.size() == 0
def test_equals_with_different_size_sets(self): test = Set([1, 2]) assert not self.test_set.equals(test)
curr = data[elem] material = Material(curr['formula']) material.getSpacegroup(curr) if limits[0]<material.spacegroup and limits[1]>material.spacegroup: material.separateElements() material.setData(curr) material.setValence() material.setAverages() materials.append(material) # shuffle, split, and scale the data size_70p = int(np.floor(len(materials)*0.7)) # gets index at 70% of material vector length size_90p = int(np.floor(len(materials)*0.9)) # gets index at 90% of material vector length # split material vector into train, test, and validate vectors train = Set(materials[0:size_70p]) test = Set(materials[size_70p+1:size_90p]) valid = Set(materials[size_90p+1:len(materials)]) # read in unsynthesized materials from file to predict values of newMatNames = np.concatenate((readMaterials("TextFiles/datsimple.txt"), readMaterials("TextFiles/datacomplex.txt")), axis=0) new_materials = [] for i in range(len(newMatNames)): material = Material(newMatNames[i]) material.separateElements() material.setValence() material.setAverages() new_materials.append(material) new_materials = shuffle(new_materials) new = Set(new_materials)
def setup_method(self, method): self.test_list = [1, 2, 3, 4, 5, 6, 7] self.test_set = Set(self.test_list)
def test_set(): # initialize set set_A = Set() # test add() set_A.add(2) set_A.add(2) set_A.add(3) set_A.add(-526) assert set_A.to_string() == '[2, 3, -526]' # test has() assert set_A.has(3) == True assert set_A.has(4) == False assert set_A.has(2) == True assert set_A.has(-526) == True # test cardinality() assert set_A.cardinality() == 3 # test update() set_A.update([]) assert set_A.to_string() == '[2, 3, -526]' set_A.update([2, 3, 4]) assert set_A.to_string() == '[2, 3, -526, 4]' # test remove() set_A.remove(2) set_A.remove(3) assert set_A.to_string() == '[-526, 4]' set_A.remove(-526) set_A.remove(4) assert set_A.to_string() == '[]' set_A.remove(100) assert set_A.to_string() == '[]' # test union() set_B = Set() set_B.update([5, 2, 743, 21, 3, 9]) set_A.update([2, 3, -526, 4]) set_union = set_A.union(set_B) assert set_union.to_string() == '[2, 3, -526, 4, 5, 743, 21, 9]' # test intersection() set_intersection = set_A.intersection(set_B) assert set_intersection.to_string() == '[2, 3]' # test difference() set_difference = set_A.difference(set_B) assert set_difference.to_string() == '[-526, 4]' # test is_subset() set_C = Set() set_C.update([2, 3]) assert set_C.is_subset(set_A) == True assert set_C.is_subset(set_B) == True set_difference.add(2) assert set_C.is_subset(set_difference) == False # test is_proper_subset() set_D = Set() set_D.update([3, 3, 2]) assert set_C.is_proper_subset(set_A) == True assert set_C.is_proper_subset(set_B) == True assert set_C.is_proper_subset(set_D) == False assert set_C.is_proper_subset(set_C) == False # test is_superset() assert set_A.is_superset(set_C) == True assert set_B.is_superset(set_C) == True assert set_C.is_superset(set_A) == False
def setUp(self): self.s = Set()
def is_surjective(self): # Need to make self.domain into a Set, since it might not be in # subclasses of Function return self._image() == Set(self.codomain)
def Sn(n): """Returns the symmetric group of order n! """ G = Set(g for g in itertools.permutations(range(n))) bin_op = Function(G * G, G, lambda x: tuple(x[0][j] for j in x[1])) return Group(G, bin_op)
def __init__(self): self.listoffeatures = ListOfFeatures("pierwsza") self.set = Set() self.listOfParams = ListOfFeatures("params")
def __init__(self, function_frame): super().__init__(function_frame) self.algorithm_name = "Forward_Greedy" S_r_0 = Set([]) self.set_up(S_r_0)
P_EIG = 0.5 test_score = [] #min_freq = [2] #max_gram = [1, 2, 3, 4, 5, 6] #p_eig = [0.2] #for mi in min_freq: # lsa = LSA(MAX_GRAM, mi, P_EIG, train_set.phrases) # for ma in max_gram: # lsa.append(LSA(ma, MIN_FREQ, P_EIG, train_set.phrases)) # for p in p_eig: #lsa = LSA(MAX_GRAM, MIN_FREQ, p, train_set.phrases) for train_index, test_index in f.splits: train_set = Set(f.x[train_index], f.y[train_index]) test_set = Set(f.x[test_index], f.y[test_index]) print("Data imported.") lsa = LSA(MAX_GRAM, MIN_FREQ, P_EIG, train_set.phrases) print("LSA created.") ########################### # LSA human_keywords = lsa.manage_keywords(f.keywords) print("Start Train LSA", time.ctime(time.time())) ex1 = lsa.process_examples(human_keywords, train_set) ex1.shutdown(wait=True) print("End Train LSA", time.ctime(time.time())) print("LSA Results computed.") ###########################
for p in p_eig: lsa.append(LSA(MAX_GRAM, MIN_FREQ, p, f.x)) for l in lsa: print("Parameters: Min_freq =", l.min_freq, "NGram_max =", l.ngram_max, "P_eig =", l.p_eig * 100) test_score = [] print("LSA created.") ########################### # LSA human_keywords = l.manage_keywords(f.keywords) lsa_results = l.train_phrases(human_keywords) print("LSA Results computed.") for j in range(50): sets = Set(lsa_results, f.y, f.x) for i in range(len(sets.x_train)): ########################### ########################### # NAIVE BAYES naive = NaiveBayesClassifier(alpha=0.01) naive.train(numpy.array(sets.x_train[i]), sets.y_train[i]) test_score.append( naive.test_score(numpy.array(sets.x_test[i]), numpy.array(sets.y_test[i]))) avg = numpy.round(numpy.average(numpy.array(test_score)), 2) y.append(avg) min_ = numpy.round(numpy.array(test_score).min(), 2) yerrormin.append(numpy.round(avg - min_, 2)) max_ = numpy.round(numpy.array(test_score).max(), 2)
def is_normal_subgroup(self, other): """Checks if self is a normal subgroup of other""" return self <= other and \ all(Set(g * h for h in self) == Set(h * g for h in self) \ for g in other)
from Set import Set set = Set() # Create an empty set set.add(45) set.add(13) set.add(43) set.add(43) set.add(1) set.add(2) print("Elements in set: " + set.toString()) print("Number of elements in set: " + str(set.getSize())) print("Is 1 in set? " + str(set.contains(1))) print("Is 11 in set? " + str(set.contains(11))) set.remove(2) print("After deleting 2, the set is " + set.toString()) print("The internal table for set is " + set.getTable()) set.clear() print("After deleting all elements") print("The internal table for set is " + set.getTable())
def multiply_cosets(x): h = x[0].pick() return Set(self.bin_op((h, g)) for g in x[1])
P_EIG = 0.5 time_score = [] train_set = [] test_set = [] #min_freq = [2] max_gram = [1, 3, 4, 5, 6] p_eig = [0.75, 0.8, 0.85, 0.9, 0.95, 1] #for mi in min_freq: # lsa = LSA(MAX_GRAM, mi, P_EIG, train_set.phrases) # for ma in max_gram: # lsa.append(LSA(ma, MIN_FREQ, P_EIG, train_set.phrases)) # for p in p_eig: #lsa = LSA(MAX_GRAM, MIN_FREQ, p, train_set.phrases) for train_index, test_index in f.splits: train_set.append(Set(f.x[train_index], f.y[train_index])) test_set.append(Set(f.x[test_index], f.y[test_index])) for p in p_eig: test_score = [] for i in range(len(train_set)): lsa = LSA(MAX_GRAM, MIN_FREQ, p, train_set[i].phrases) print("LSA created.") ########################### # LSA human_keywords = lsa.manage_keywords(f.keywords) print("Start", datetime.datetime.now()) aux1 = datetime.datetime.now() ex1 = lsa.process_examples(human_keywords, train_set[i]) ex1.shutdown(wait=True)
def kernel(self): """Returns the kernel of the homomorphism as a Group object""" G = Set(g.elem for g in self.domain if self(g) == self.codomain.e) return Group(G, self.domain.bin_op.new_domains(G * G, G))
#!/usr/bin/python3 import readWrite from Set import Set import addCards if __name__== "__main__": readWrite.repopSave() temp = readWrite.load("testData.txt") f = temp[0] c = temp[1] data = Set(f , c) addCards.addCards(data) readWrite.write("testData.txt", data.getf(), data.getc())
def image(self): """Returns the image of the homomorphism as a Group object""" G = Set(g.elem for g in self._image()) return Group(G, self.codomain.bin_op.new_domains(G * G, G))
def testDiff(self): newSet = Set([1, 2, 3, 4, 1, 3, 7]) setB = Set([2, 3]) self.assertTrue(newSet.diff(setB) == [1, 4, 7])
def Zn(n): """Returns the cylic group of order n""" G = Set(range(n)) bin_op = Function(G * G, G, lambda x: (x[0] + x[1]) % n) return Group(G, bin_op)
def testIntersection(self): newSet = Set([3, 4, 5, 8]) setB = Set([1, 7, 3, 8, 3]) self.assertTrue( newSet.intersection(setB) == setB.intersection(newSet) and newSet.intersection(setB) == [3, 8])
def create_set(self, set_dict): self.set = Set(set_dict)