def split_set_to_train_test(dataset, train_size):
    copy = list(dataset)
    train_set = []
    while len(train_set) < train_size:
        index = random.randrange(len(copy))
        train_set.append(copy.pop(index))
    test_set = copy
    return train_set, test_set
Beispiel #2
0
def splitDataset(dataset,splitRatio):
    trainSize = int(len(dataset)*splitRatio)
    trainSet=[]
    copy=list(dataset)
    while len(trainSet)<trainSize:
        index=random.randrange(len(copy))
        trainSet.append(copy.pop(index))
    return [trainSet,copy]
def hold_out_split(dataset, splitRatio, isRandom):
    trainSize = int(len(dataset) * splitRatio)
    trainSet = []
    copy = list(dataset)
    if (isRandom == True):
        np.random.shuffle(copy)
    while len(trainSet) < trainSize:
        trainSet.append(copy.pop())
    return [trainSet, copy]
Beispiel #4
0
 def check_cyclic_laser(laser):
     '''Checks if laser forms a loop, returns Boolean.'''
     copy = laser.copy()
     for i in range(len(laser)):
         element = copy.pop()
         index = copy.count(element)
         if index != 0 and copy[-1] == copy[index - 1]:
             return True
     if not copy:
         return False
def divide_dataset_k(dataset, k):
    trainSize = int(len(dataset) / k)
    dividedSet = {}
    copy = list(dataset)
    for i in range(k):
        np.random.shuffle(copy)
        if i not in dividedSet:
            dividedSet[i] = []
        while len(dividedSet[i]) < trainSize:
            dividedSet[i].append(copy.pop())
    return dividedSet
Beispiel #6
0
 def solve(self, node, future_sets):
     # 迭代终止条件,表示没有了未遍历节点,直接连接当前节点和起点即可
     if len(future_sets) == 0:
         return self.distanceMatrix[node][self.start_node]
     _minDistance = np.inf
     # node如果经过future_sets中节点,最后回到原点的距离
     distance = []
     # 遍历未经历的节点
     for i in range(len(future_sets)):
         s_i = future_sets[i]
         copy = future_sets[:]
         copy.pop(i)
         distance.append(self.distanceMatrix[node][s_i] +
                         self.solve(s_i, copy))
     # 动态规划递推方程,利用递归
     _minDistance = min(distance)
     next_one = future_sets[distance.index(_minDistance)]
     # 未遍历节点集合
     c = self.transfer(future_sets)
     self.array[node][c] = next_one
     return _minDistance
Beispiel #7
0
def populatePairs():
    for island in islandList:
        copy = island.adjacentIslands.copy()
        while (len(copy) > 0):
            a = set()
            popped = copy.pop()
            print("Found pair:")
            island.printCoords()
            popped.printCoords()
            a.add(island)
            a.add(popped)
            b = frozenset(a)
            adjacentPairs.add(b)
Beispiel #8
0
    def specialize_np(self, fs, tagged, cue=None):
        """ This method takes an NP SemSpec specifically and puts it into an N-Tuple (the last N-Tuple). """
        replace = None
        if fs.m.type() == "HeadingSchema":
            new_od = fs.m.tag.type()
            replace = 'heading'  # Hack
        elif fs.m.type() == 'SPG':
            new_od = {
                'objectDescriptor': self.get_objectDescriptor(fs.m.landmark)
            }
            replace = 'goal'
            self._stacked.append(new_od)
        elif cue:
            new_od = {'objectDescriptor': self.get_objectDescriptor(fs.m)}
            self._stacked.append(new_od)
        else:
            new_od = self.resolve_anaphoricOne(fs.m)
            self._stacked.append(new_od)

        i = -1
        for param in tagged.parameters:
            i += 1
            p = param.__dict__
            copy = deepcopy(p)
            for key, value in p.items():
                if type(value) == Struct:
                    p2 = value.__dict__
                    for k, v in p2.items():
                        if "*" in str(k):
                            temp = str(k).replace("*", "")
                            if (temp == 'heading' or temp == 'goal'):
                                if temp == replace:
                                    p2[temp] = new_od
                                    #p2.pop(k)
                                    #copy[key] = Struct(p2)
                                else:
                                    p2[temp] = None
                            else:
                                p2[temp] = new_od
                                #p2.pop(k)
                                #copy[key] = Struct(p2)
                            p2.pop(k)
                            copy[key] = Struct(p2)
                elif "*" in str(key):
                    temp = str(key).replace("*", "")
                    p[temp] = p.pop(key)
                    copy[temp] = new_od
                    copy.pop(key)
                    #p[temp] = new_od

        tagged.parameters[i] = Struct(copy)
        """
        meaning = fs.m
        if meaning.ontological_category.type() == "antecedent":
            test = self.resolve_anaphoricOne(meaning)
            print(test)
        else:
            objectDescriptor = self.get_objectDescriptor(fs.m)
            print(objectDescriptor)

        """

        return tagged
def nonzero_min(l):
    copy = [x for x in l]
    while min(copy) == 0:
        copy.pop(copy.index(min(copy)))
    return min(copy)
    def augmentation(self, k, v, graph, edgelist, mst):
        import copy
        '''
        Augments an edge between each source and destination node for each tie set
        Finds the final reliability, cost and outputs the graph with the added edge
        '''
        # Initializations
        costs = []
        rel_ring = []
        rel_others = []
        mstcopy = copy.deepcopy(mst)

        # copy the spanning tree to check for swapped nodes (means its the same edge)
        swappedmst = copy.deepcopy(mst)
        # generate edges with swapped nodes since those edges are equivalent (e.g. AB and BA are same edge)
        for indx, x in enumerate(swappedmst):
            tmp = x[0]
            swappedmst[indx][0] = x[1]
            swappedmst[indx][1] = tmp

        # get the source node of the tie set
        start = self.city_number_to_letter[v[0][0]]
        # get the destination node of the tie set
        stop = self.city_number_to_letter[k]

        # add an edge between source and destination node to create a loop
        for edge in edgelist:
            if (edge.vertice_1 == start and edge.vertice_2 == stop) or (
                    edge.vertice_1 == stop and edge.vertice_2 == start):
                # add the corresponding values to their respective arrays
                c = edge.getCost()
                w = edge.getReliability()
                costs.append(c)
                rel_ring.append(w)
        graph.addEdge(self.city_letter_to_number[start],
                      self.city_letter_to_number[stop], w, c)

        # add all the costs and reliabilities in original tie set to arrays and add those edges to the graph
        for x in range(0, len(v)):
            graph.addEdge(v[x][0], v[x][1], v[x][2], v[x][3])
            costs.append(v[x][3])
            rel_ring.append(v[x][2])

        # Reliability calculation of loop
        # follow the reliability formula for a loop = P(all success) + P(one failure)
        # find probability of all edges being successful = product of all values in reliability array
        RelofLoop = self.prodofList(rel_ring)
        # add probabilities where one and only one edge fails
        for indx, r in enumerate(rel_ring):
            copy = rel_ring.copy()
            failure = 1 - r
            copy.pop(indx)
            tmp = self.prodofList(copy)
            product = failure * tmp
            RelofLoop += product

        # steps to find the remaining edges in the spanning tree not included in the loop
        # those remaining edges are multiplied by the reliability of the loop
        for x in v:
            # pop all edges included in the tie set
            if x in mst:
                mstcopy.pop(mstcopy.index(x))
            elif x in swappedmst:
                try:
                    mstcopy.pop(mstcopy.index(x))
                except:
                    copyofx = x.copy()
                    tmp = x[0]
                    copyofx[0] = x[1]
                    copyofx[1] = tmp
                    mstcopy.pop(mstcopy.index(copyofx))

        # add all reliabilities, costs and edges not in the loop but in the network to new array
        for x in mstcopy:
            rel_others.append(x[2])
            costs.append(x[3])
            graph.addEdge(x[0], x[1], x[2], x[3])

        # find final reliability and cost
        restofrel = self.prodofList(rel_others)
        finalCost = self.additionofList(costs)
        finalrel = restofrel * RelofLoop
        return finalrel, finalCost, graph
    def specialize_np(self, fs, tagged, cue=None):
        """ This method takes an NP SemSpec specifically and puts it into an N-Tuple (the last N-Tuple). """
        replace = None
        if fs.m.type() == "HeadingSchema":
            new_od = fs.m.tag.type()
            replace = 'heading'  # Hack
        elif fs.m.type() == 'SPG':
            new_od = {'objectDescriptor': self.get_objectDescriptor(fs.m.landmark)}
            replace = 'goal'
            self._stacked.append(new_od)
        elif cue:
            new_od = {'objectDescriptor': self.get_objectDescriptor(fs.m)}
            self._stacked.append(new_od)
        else:
            new_od = self.resolve_anaphoricOne(fs.m)
            self._stacked.append(new_od)


        i = -1
        for param in tagged.parameters:
            i += 1
            p = param.__dict__
            copy = deepcopy(p)
            for key, value in p.items():
                if type(value) ==Struct:
                    p2 = value.__dict__
                    for k, v in p2.items():
                        if "*" in str(k):
                            temp = str(k).replace("*", "")
                            if (temp == 'heading' or temp == 'goal'): 
                                if temp == replace:
                                    p2[temp] = new_od
                                    #p2.pop(k)
                                    #copy[key] = Struct(p2)
                                else:
                                    p2[temp] = None
                            else:
                                p2[temp] = new_od
                                #p2.pop(k)
                                #copy[key] = Struct(p2)
                            p2.pop(k)
                            copy[key] = Struct(p2)
                elif "*" in str(key):
                    temp = str(key).replace("*", "")
                    p[temp] = p.pop(key)
                    copy[temp] = new_od
                    copy.pop(key)
                    #p[temp] = new_od

        tagged.parameters[i] = Struct(copy)
 


        """
        meaning = fs.m
        if meaning.ontological_category.type() == "antecedent":
            test = self.resolve_anaphoricOne(meaning)
            print(test)
        else:
            objectDescriptor = self.get_objectDescriptor(fs.m)
            print(objectDescriptor)

        """



        return tagged
Beispiel #12
0
        lines1 = sorted(fin.readline()[:-1].split(" "), key=str.lower)
        lines2 = sorted(fin.readline()[:-1].split(" "), key=str.lower)
        #print items
        #print lines1
        #print lines2

        point_war = 0
        point_dwar = 0
        copy = lines2[:]

        #war game:
        for x in xrange(0,len(lines1)):
            played = None
            for y in xrange(0, len(copy)):
                if lines1[x] < copy[y]:
                    played = copy.pop(y)
                    break
            if played == None:
                point_war += 1
                played = copy.pop(0)
            #print lines1[x], played

        #dwar game:
        for x in xrange(0,len(lines1)):
            if lines1[x] > lines2[0]:
                lines2.pop(0)
                point_dwar +=1
            else:
                lines2.pop(len(lines2)-1)