def christofides_tsp(graph, starting_node=0):
    """
    Function implements Christofides' algorithm beginning at
    a fixed point, which will be San Francisco by default.
        graph: 2d numpy array matrix where the index of the distances is the 
        position of the cities affected created in the dict below eg San Francisco at index 0,
        Seattle at index 1, LA index 2 etc. These cities are rep with their airport codes
        The distance from an airport to itself is always 0.
        The distance from any of the airports to the other is in miles and filled in the array.
        starting_node: of the TSP
    Returns:
        tour given by christofies TSP algorithm
    """

    mst = minimal_spanning_tree(
        graph, 'Prim',
        starting_node=0)  #calls on MST function created above to the graph
    odd_degree_nodes = list(
        _get_odd_degree_vertices(mst))  #gets the odd vertices of the MST
    odd_degree_nodes_ix = np.ix_(odd_degree_nodes, odd_degree_nodes)
    nx_graph = nx.from_numpy_array(-1 * graph[odd_degree_nodes_ix])
    matching = max_weight_matching(
        nx_graph, maxcardinality=True)  #implements perfect matching
    euler_multigraph = nx.MultiGraph(
        mst)  #produces an Euler graph with repeated edges
    for edge in matching:
        euler_multigraph.add_edge(
            odd_degree_nodes[edge[0]],
            odd_degree_nodes[edge[1]],
            weight=graph[odd_degree_nodes[edge[0]]][odd_degree_nodes[edge[1]]])
    euler_tour = list(eulerian_circuit(euler_multigraph, source=starting_node))
    path = list(itertools.chain.from_iterable(
        euler_tour))  #iterates through edges in graph
    return _remove_repeated_vertices(path, starting_node)[:-1]
def christofides_tsp(graph, starting_node=0):
    """
    Christofides TSP algorithm
    http://www.dtic.mil/dtic/tr/fulltext/u2/a025602.pdf
    Args:
        graph: 2d numpy array matrix
        starting_node: of the TSP
    Returns:
        tour given by christofies TSP algorithm
    Examples:
        >>> import numpy as np
        >>> graph = np.array([[  0, 300, 250, 190, 230],
        >>>                   [300,   0, 230, 330, 150],
        >>>                   [250, 230,   0, 240, 120],
        >>>                   [190, 330, 240,   0, 220],
        >>>                   [230, 150, 120, 220,   0]])
        >>> christofides_tsp(graph)
    """

    mst = minimal_spanning_tree(graph, 'Prim', starting_node=0)
    odd_degree_nodes = list(_get_odd_degree_vertices(mst))
    odd_degree_nodes_ix = np.ix_(odd_degree_nodes, odd_degree_nodes)
    nx_graph = nx.from_numpy_array(-1 * graph[odd_degree_nodes_ix])
    matching = max_weight_matching(nx_graph, maxcardinality=True)
    euler_multigraph = nx.MultiGraph(mst)
    for edge in matching:
        euler_multigraph.add_edge(odd_degree_nodes[edge[0]], odd_degree_nodes[edge[1]],
                                  weight=graph[odd_degree_nodes[edge[0]]][odd_degree_nodes[edge[1]]])
    euler_tour = list(eulerian_circuit(euler_multigraph, source=starting_node))
    path = list(itertools.chain.from_iterable(euler_tour))
    return _remove_repeated_vertices(path, starting_node)[:-1]
Example #3
0
def match_seller_with_nearest_buyer(buy_orders, sell_orders,
                                    banned_user_matches, max_number_of_shares):
    def get_cost(buy_order, sell_order):
        if buy_order["price"] < sell_order["price"] or (
            (buy_order["user_id"], sell_order["user_id"])
                in banned_user_matches):
            return None
        return abs(buy_order["price"] -
                   sell_order["price"]) * max_number_of_shares * 2 + abs(
                       buy_order["number_of_shares"] -
                       sell_order["number_of_shares"])

    graph = nx.Graph()
    for sell_order in sell_orders:
        for buy_order in buy_orders:
            cost = get_cost(buy_order, sell_order)
            if cost is not None:
                # Invert the cost, since the algorithm computes the maximum total instead of the
                # minimum
                graph.add_edge(buy_order["id"], sell_order["id"], weight=-cost)

    matching = max_weight_matching(graph, maxcardinality=True)

    buy_order_ids = {buy_order["id"] for buy_order in buy_orders}

    result = set()
    for pair in matching:
        buy_order_id = pair[0] if pair[0] in buy_order_ids else pair[1]
        sell_order_id = pair[1] if pair[0] in buy_order_ids else pair[0]
        result.add((buy_order_id, sell_order_id))

    return result
Example #4
0
 def find_minimum_weight_matching(self):
     """ finds a minimum weight perfect matching"""
     print "computing delaunay of odd_deg_nodes"
     circumcenters, edges, tri_points, tri_neighbors = triang.delaunay(
         self.x[self.odd_deg_nodes], self.y[self.odd_deg_nodes])
     o = nx.Graph()
     o.dist_func = lambda i, j: -self.euclidean_dist(i, j)
     for i, j in edges:
         mi = self.odd_deg_nodes[i]
         mj = self.odd_deg_nodes[j]
         o.add_edge(mi, mj, weight=o.dist_func(mi, mj))
     self.o = o
     print '#edges:', len(edges), '#nodes:', len(o.nodes()), len(
         self.odd_deg_nodes)
     print "computing minimum matching"
     t1 = time.time()
     mates = max_weight_matching(o, maxcardinality=True)
     t2 = time.time()
     print "took %s" % (t2 - t1)
     m = nx.Graph()
     for i in mates.keys():
         m.add_edge(i, mates[i], weight=self.g.dist_func(i, mates[i]))
     print '#edges:', len(m.edges()), '#nodes:', len(m.nodes())
     self.plot_edges(m.edges(), 'r-', 2)
     self.m = m
 def get_max_matchable_edges_networkx(self, ndds=None):
     """Find the set of edges which must be in every maximum matching.
     """
     g = nx_Graph()
     translate = {}
     for edge in self.es:
         v1 = edge.source()
         v2 = edge.target()
         if v2.index() < v1.index():
             continue
         new_edge = (v1.index(), v2.index())
         if self.edge_exists(v2, v1) or edge.donor().is_altruistic():
             g.add_node(new_edge[0])
             g.add_node(new_edge[1])
             g.add_edge(v1.index(), v2.index())
             translate[new_edge] = edge
     count = len(translate)
     for ndd in ndds:
         for edge in ndd.edges:
             v1 = edge.donor()
             v2 = edge.target()
             new_edge = (v2.index(), count + v1.index())
             g.add_node(new_edge[0])
             g.add_node(new_edge[1])
             g.add_edge(v2.index(), count + v1.index())
             translate[new_edge] = edge
     # TODO Add NDD edges to this graph!
     largest = max_weight_matching(g)
     LOGGER.debug("Largest matching has size %d", len(largest))
     edges = []
     for v1, v2 in largest.items():
         if v1 < v2:
             edges.append([v1, v2])
     matchable = []
     while edges:
         v1, v2 = edges.pop()
         LOGGER.debug("Testing [%s, %s]", v1, v2)
         g.remove_edge(v1, v2)
         new_max = max_weight_matching(g)
         if len(new_max) < len(largest):
             LOGGER.debug("new matching has size %d", len(new_max))
             edges = list(filter(lambda x: x[0] in new_max, edges))
             matchable.append((v1, v2))
             LOGGER.debug("[%s, %s] is matchable", v1, v2)
         g.add_edge(v1, v2)
     LOGGER.info("Found %s maximally matchable edges" % len(matchable))
     return (translate[e] for e in matchable)
Example #6
0
 def run(self, players):
     # logger.info('isinstance(players, PlayerGraph)= '+str(isinstance(players, PlayerGraph)))
     if not isinstance(players, PlayersGraph):
         # logger.info(type(players))
         players = PlayersGraph().load_players(players)
     return max_weight_matching(players,
                                maxcardinality=False,
                                weight="retain_weight")
Example #7
0
def get_mmr(edges,n_ref, n_pred):
    G= nx.Graph()
    G.add_weighted_edges_from(edges)
    G = G.to_undirected()

    max_match = matching.max_weight_matching(G, maxcardinality=True)
    sum_ = 0
    for e in max_match:
        sum_ += G[e[0]][e[1]]['weight']
    return sum_/min(n_ref, n_pred)
Example #8
0
def exact_join(rider_set, driver_set, PI_0):
    G = nx.Graph()
    C = candidate_set_gen(rider_set, driver_set, PI_0)
    for tup in C:
        shared_route_percentage = get_shared_route_percentage(tup[0], tup[1])
        if is_satisfied_with_shared_requirement(tup[0].sharing_requirement,
                                                shared_route_percentage):
            G.add_edge(tup[0], tup[1], weight=shared_route_percentage)
    matching = max_weight_matching(G)
    return matching
Example #9
0
def KM(adj_matrix, N=10):
    G = nx.algorithms.bipartite.matrix.from_biadjacency_matrix(
        sparse.csr_matrix(adj_matrix))
    M = matching.max_weight_matching(G)
    poss = np.zeros([N], dtype=np.int32)
    for i, j in M:
        if i < N:
            poss[i] = j - N
        else:
            poss[j] = i - N
    return poss
Example #10
0
def get_weight_of_new_iteration_graph_from_formula(previous_iteration, node_u, node_v):
    neighbors_u = list(neighbor_degree_sorted_list[node_u][0])
    #     print(neighbors_u)
    neighbors_v = list(neighbor_degree_sorted_list[node_v][0])
    #     print(neighbors_v)

    neighbors_u = list(set(neighbors_u) & set(list_of_nodes_selected_sorted))
    neighbors_v = list(set(neighbors_v) & set(list_of_nodes_selected_sorted))
    #     print(neighbors_u)
    #     print(neighbors_v)
    combined = [neighbors_u, neighbors_v]
    #     print(combined)
    all_possible_edges = list(itertools.product(*combined))
    subgraph = nx.Graph()
    degree_u = len(neighbors_u)
    degree_v = len(neighbors_v)
    for edge in all_possible_edges:
    #     if temp_matrix_weight_matching_matrix.has_edge(*edge):
        if edge[0] != edge[1]:
            node_x = edge[0]
            node_y = edge[1]
            subgraph.add_edge(*edge, weight = previous_iteration[node_x][node_y]['weight'])
    #     print('Created subgraph')
    #     print(subgraph.nodes())
    #     print(subgraph.edges(data=True))
    #     print('---')

    trialing_using_nx_max_matching = True
    if trialing_using_nx_max_matching:
        # max_weight_matching is a function of networkX that performs the maximal weight matching.
        # By using the edge weights of the edges as the values of RoleSim's previous iteration values,
        # and allowing only the edges that connect neighbors of u to neighbors
        # of v, we allow networkX to perform the calculation for us.
        maximal_matching_possibilities = [max_weight_matching(subgraph)]
    else:
        # This method was found to be mildly suboptimal compared to the method from networkX above.
        maximal_matching_possibilities = all_maximal_matchings(subgraph)
    # print(maximal_matching_possibilities)
    maximum_weight_of_all = 0
    best_possibility = maximal_matching_possibilities[0]
    for possibility in maximal_matching_possibilities:
        current_weight = 0
        for edge in possibility:
            current_weight += subgraph[edge[0]][edge[1]]['weight']
        if current_weight >= maximum_weight_of_all:
            maximum_weight_of_all = current_weight
            best_possibility = possibility
            
    if degree_u == 0 and degree_v == 0:
        # Edge case, in case neighbors are missing from the pruned set of nodes, which is a certain possibility.
        maximum_weight_value = 0
    else:
        maximum_weight_value = maximum_weight_of_all/max(degree_u, degree_v)
    return maximum_weight_value
Example #11
0
def biMatch (live, boxes):

    sigma = 8.0
    min_gate = 5.0

    #print("live contains : " + str(live) )

    #print("boxes contains : " + str(boxes) )

    def weight(orig, post, R):

        #print ("weight()'s orig : " + str( orig[currKey][1] ))

        #print ("weight()'s post : " + str( post[currKey][1] ))

        beforeRect = np.array(orig[1][-1] )

        postRect = np.array(post)

        distance = np.linalg.norm(postRect - beforeRect)

        return scipy.stats.norm(0, R).pdf(distance)

    # in some loop, which ends with live = after_objects before continuing around and in whose first iteration live is empty.
    B = nx.Graph()

    #print("length of live :" + str( len(live) ) )

    for oi in live:

        #print ("for oi in range(len(live)): " + str(oi))

        B.add_node("before{}".format(oi))

    for pi in range(len(boxes)): # already augmented with the “stay” objects

        B.add_node("created{}".format(pi))

        B.add_node("after{}".format(pi))

        B.add_edge("created{}".format(pi), "after{}".format(pi), weight=scipy.stats.norm(0, sigma).pdf(min_gate * sigma))

    for oi, o in live.items():

      for pi, p in enumerate(boxes):

        B.add_edge("before{}".format(oi), "after{}".format(pi), weight=weight(o, p, 8.0) )

    match = matching.max_weight_matching(B)

    #print ("match contents " + str(match) )

    return match
Example #12
0
def construct_matching():
	maxWeight = sorted(G.edges(data=True), key=lambda x: x[2]['weight'], reverse=True)[0][2]['weight']
	for e in G.edges():
		G[e[0]][e[1]]['weight'] = 2 * maxWeight - G[e[0]][e[1]]['weight']
	match = matching.max_weight_matching(G)
	assignments = []
	for x in match:
		if isinstance(x[0], tuple):
			assignments.append((x[0][0], x[1]))
		else:
			assignments.append((x[1][0], x[0]))
	return assignments
Example #13
0
def get_maximal_matching_weight_from_existing_graph_edges(graph, node_u, node_v):
    neighbors_u = list(neighbor_degree_sorted_list[node_u][0])
    neighbors_v = list(neighbor_degree_sorted_list[node_v][0])
    combined = [neighbors_u, neighbors_v]
    all_possible_edges = list(itertools.product(*combined))
    subgraph = nx.Graph()
    
    for edge in all_possible_edges:
        if graph.has_edge(*edge):
            subgraph.add_edge(*edge)
    
    weight = max_weight_matching(subgraph)
    return weight
Example #14
0
    def hungarian_matching(self, men, women, score_arr):

        # make graph
        score_arr = score_arr.tocoo()
        G = nx.Graph()
        for i, j, v in itertools.izip(score_arr.row, score_arr.col, score_arr.data):
            man = self.men.rows[i]
            woman = self.women.rows[j]                
            G.add_edge(man, woman, weight=v)
            
        mate = matching.max_weight_matching(G, maxcardinality=True)
        
        return mate
Example #15
0
    def hungarian_matching(self, men, women, score_arr):

        # make graph
        score_arr = score_arr.tocoo()
        G = nx.Graph()
        for i, j, v in itertools.izip(score_arr.row, score_arr.col,
                                      score_arr.data):
            man = self.men.rows[i]
            woman = self.women.rows[j]
            G.add_edge(man, woman, weight=v)

        mate = matching.max_weight_matching(G, maxcardinality=True)

        return mate
Example #16
0
def pairwise_match(matrix, max_score, regular=True, zero_out=False):
	''' Creates an complete graph G from the given donor-patient matrix.
	
	Each original donor-patient pair is set as a node with edges going to
	other donor-patient pairs. Edge weights are derived from the average score
	of the pairwise exchange or our modified average.
	score = AVG(value1,value2) * (MAXPOSSIBLESCORE - ABS_DIFF(value1,value2))
	Max weight matching is then done to get the pairwise matching.

	Returns the row_ind and col_ind lists indicating the matching
	(Similar to linearsumassignment).
	'''
	n = matrix.shape[0]
	
	# create graph
	# C(6,2) [6 taken 2] no of edges
	# ignore self loops for now
	G = nx.Graph()
	for i in range(0,n-1): 
		for j in range(i+1,n):
			if zero_out and (matrix[i][j] == 0 or matrix[j][i] == 0):
				# if zero out is True, we automatically make the score
				# zero if there is one incompatible pair
				# score = 0
				# EDIT:
				# we now instead skip adding the edge
				# instead of adding an edge with zero weight
				continue
			else:
				average = (matrix[i][j] + matrix[j][i]) / 2
				if regular:
					# regular average
					score = average
				else:
					# our modified average
					score = average * (max_score - abs(matrix[i][j]-matrix[j][i]))

			G.add_edge(i,j,weight=score)

	M = matching.max_weight_matching(G,maxcardinality=True)
	
	# initialize row and col indices list
	row_ind = [x for x in range(n)]
	col_ind = [-1 for x in range(n)]	# -1 for unmatched
	for pair in M:
		col_ind[pair[0]] = pair[1]
		col_ind[pair[1]] = pair[0]
		

	return row_ind, col_ind
def christofides(g):
    mst = minimum_spanning_tree(g)

    odd_degree_nodes = [node for node, degree in mst.degree() if degree % 2 == 1]
    odd_subgraph = g.subgraph(odd_degree_nodes).copy()

    # invert weight since networkx can only do max weight matching
    for edge in odd_subgraph.edges(data=True):
        edge[2]['weight'] = -edge[2]['weight']
    matching = max_weight_matching(odd_subgraph, maxcardinality=True)

    eulerian_graph = nx.MultiGraph(mst)
    for u, v in matching:
        eulerian_graph.add_edge(u, v, **g[u][v])

    eulerian_edges = nx.eulerian_circuit(eulerian_graph)
    return [u for u, v in eulerian_edges]
Example #18
0
def make_pairs():
    #creates connection to mongodb database
    client = MongoClient(
        "mongodb+srv://admin:[email protected]/test?retryWrites=true"
    )
    db = client.test

    rows = db.inventory.find({'partner': 'None'})

    students = []

    for row in rows:
        share_langs = {
            row['sl1']: row['sp1'],
            row['sl2']: row['sp2'],
            row['sl3']: row['sp3']
        }
        learn_langs = {
            row['ll1']: row['lp1'],
            row['ll2']: row['lp2'],
            row['ll3']: row['lp3']
        }
        students.append(
            Student(row['name'], share_langs, learn_langs, row['prevp'],
                    row['unableprev'], row['rate1'], row['rate2'],
                    row['majors'], row['partner']))

    G = Graph()

    #adds students who can potentially be partners to the graph
    for s in students:
        for ss in students:
            l = canMatch(s, ss)
            if (s != ss and len(l) != 0):
                G.add_edge(s, ss, weight=weight(s, ss))
                #print(s.name + " and " + ss.name + " for " + canMatch(s, ss)[0] + " and " + canMatch(s, ss)[1] + " (" + str(weight(s, ss)) + ") ")

    #s = sorted(max_weight_matching(G))
    #print('{' + ', '.join(map(lambda t: ': '.join(map(repr, t)), s)) + '}')

    pairset = max_weight_matching(
        G, maxcardinality=True)  #result of max weighting algorithm
    pairs = []
    for p in pairset:
        pairs.append(Pair(p[0], p[1]))
    return pairs
Example #19
0
def align_one_to_one(matches: List[Tuple[Any, Any, float]]) -> Dict[Any, Any]:
    bipartite = nx.Graph()
    if not matches:
        return {}

    for sys_arg, grt_arg, score in matches:
        sys_arg = f"sys_{sys_arg[0]}:{sys_arg[1]}"
        grt_arg = f"grt_{grt_arg[0]}:{grt_arg[1]}"
        bipartite.add_edge(sys_arg, grt_arg, weight=score)
    max_alignment = max_weight_matching(bipartite, maxcardinality=True)
    sys_to_grt = {}
    for arg_1, arg_2 in max_alignment:
        sys_arg, grt_arg = (arg_1, arg_2) if "sys" in arg_1 else (arg_2, arg_1)
        sys_arg = tuple(int(coord) for coord in sys_arg[4:].split(":"))
        grt_arg = tuple(int(coord) for coord in grt_arg[4:].split(":"))
        sys_to_grt[sys_arg] = grt_arg
    return sys_to_grt
Example #20
0
    def hungarian_matching(self, men, women, score_arr):

        # make graph
        score_arr = score_arr.tocoo()
        G = nx.Graph()
        for i, j, v in itertools.izip(score_arr.row, score_arr.col, score_arr.data):
            man = self.men.rows[i]
            woman = self.women.rows[j]
            G.add_edge(man, woman, weight=v)

        # mate will contains duplicates, where the same {key:value} also occurs alongside {value:key}
        mate = matching.max_weight_matching(G, maxcardinality=True)
        unique_mate = {}
        for key, value in mate.iteritems():
            if key not in unique_mate.values():
                unique_mate[key] = value

        return unique_mate
Example #21
0
 def find_minimum_weight_matching(self):
     """ finds a minimum weight perfect matching"""
     print "computing delaunay of odd_deg_nodes"
     circumcenters, edges, tri_points, tri_neighbors = triang.delaunay(self.x[self.odd_deg_nodes], self.y[self.odd_deg_nodes])
     o = nx.Graph()
     o.dist_func = lambda i,j: -self.euclidean_dist(i,j)
     for i, j in edges:
         mi = self.odd_deg_nodes[i]
         mj = self.odd_deg_nodes[j]
         o.add_edge(mi, mj, weight=o.dist_func(mi, mj))
     self.o = o
     print '#edges:', len(edges), '#nodes:', len(o.nodes()), len(self.odd_deg_nodes)
     print "computing minimum matching"
     t1 = time.time()
     mates = max_weight_matching(o, maxcardinality=True)
     t2 = time.time()
     print "took %s" % (t2-t1)        
     m = nx.Graph()
     for i in mates.keys():
         m.add_edge(i,mates[i], weight=self.g.dist_func(i,mates[i]))
     print '#edges:', len(m.edges()), '#nodes:', len(m.nodes())
     self.plot_edges(m.edges(),'r-',2)
     self.m = m
Example #22
0
    def get_minimum_matching(self):
        """
        calculate a minimum weight matching with maximum cardinality.
        Therefore invert weights by calculating 1/w for ever weight w, and then
        use networkx.algorithms.matching.max_weight_matching
        """

        subgraphs = self.get_connected_components()

        for sg in subgraphs:
            for node1, node2 in sg.edges_iter():
                dict_ = sg[node1][node2]
                dict_['weight'] = 1 / dict_['distance']

            seen = []

            for edge1, edge2 in max_weight_matching(sg).items():
                if edge1 in seen or edge2 in seen:
                    continue
                dist = sg.edge[edge1][edge2]['distance']
                seen.append(edge1)
                seen.append(edge2)
                yield tuple(sorted([edge1, edge2]) + [dist])
Example #23
0
def pick_a_ride(pool_rides, origin, pool_window_time):
    global total_saved_trips

    try:
        logging.info("Forming pools for the origin " + origin +
                     " for pool window " + str(pool_window_time))
        # print("Forming pools for the origin " + origin + " for pool window " + str(pool_window_time))
        if len(pool_rides) == 0:
            logging.info(
                "No trips present to form pools for the given origin " +
                origin)
            # print("No trips present to form pools for the given origin " + origin)
        else:
            logging.info("Number of requests {}".format(len(pool_rides)))
            # print("Number of requests ", len(pool_rides))
            pool = random_pool_Ids.pop()

            if origin == "From Laguardia":
                rideLabel = "From LaGuardia"
            else:
                rideLabel = "To LaGuardia"
            G.clear()
            pool_shares = pool_rides
            total_distance_saved = 0

            logging.info("POOL ID {}".format(pool))
            print("POOL ID ", pool)
            rideIDS = set()
            rideIDSWithDistance = dict()
            start_time = datetime.now()
            for i in range(0, len(pool_rides)):
                rideIDS.add(pool_shares.iloc[i]['RideID'])
                rideIDSWithDistance[pool_shares.iloc[i][
                    'RideID']] = pool_shares.iloc[i]['dist_airport']
                logging.info("RIDEID {}".format(pool_shares.iloc[i]['RideID']))

                for j in range(i + 1, len(pool_rides)):
                    rideA = pool_shares.iloc[i]
                    rideB = pool_shares.iloc[j]
                    distance_saved = sharing_condition(rideA, rideB, origin)
                    # if distance_saved is > 0, it means ride-sharing condition has been satisfied
                    if distance_saved > 0:
                        logging.info(
                            "Ride Sharing condition satisfied for ride ids {} {} "
                            "with distance saved {}".format(
                                rideA[0], rideB[0], distance_saved))
                        G.add_node(rideA[0])
                        G.add_node(rideB[0])
                        # store the values with ride id a and b along with the max distance saved
                        G.add_edge(rideA[0], rideB[0], weight=distance_saved)

            record_entry = datetime.now()

            # run maximum matching algorithm for ride-shareable graph G
            ride_shareable_nodes = max_weight_matching.max_weight_matching(G)

            # total distance of all rides in a pool
            total_distance_in_pool = sum(rideIDSWithDistance.values())

            ride_shared_nodes_count = len(ride_shareable_nodes)
            # Remove ride IDS that are paired from the array of RideIDS to filter non-shared Ride ids
            for nodes in ride_shareable_nodes:
                if nodes[0] in rideIDS:
                    rideIDS.remove(nodes[0])
                    del rideIDSWithDistance[nodes[0]]
                if nodes[1] in rideIDS:
                    rideIDS.remove(nodes[1])
                    del rideIDSWithDistance[nodes[1]]
                total_distance_saved = total_distance_saved + G.get_edge_data(
                    nodes[0], nodes[1])['weight']

            total_distance_saved = float(total_distance_saved)

            final_trips = len(pool_shares) - ride_shared_nodes_count

            trips_saved = len(pool_shares) - final_trips
            total_saved_trips = total_saved_trips + trips_saved

            unshared_trips = len(pool_shares) - ride_shared_nodes_count * 2
            # get the end time meaning that pool processing is complete
            end_time = datetime.now()
            difference = end_time - start_time

            difference = float(difference.total_seconds())

            # store in db
            pool_insert_query = "insert into pool_details (pool_id,count_of_rides,time_taken,trips_saved," \
                                "final_trips,dist_saved,rideLabel,pool_window,record_entry," \
                                "unshared_trips,initial_trips,initial_trips_distance) values (" + \
                                str(pool) + "," + str(len(ride_shareable_nodes)) + "," + str(
                difference) + "," + str(trips_saved) + "," + str(final_trips) + "," + str(
                total_distance_saved) + "," + "\"" + rideLabel + "\"," + str(pool_window_time) + ",\"" + str(
                record_entry) + "\"," + str(unshared_trips) + "," + str(len(pool_shares)) + "," + str(
                total_distance_in_pool) + ");"
            # print(pool_insert_query)
            database_response = insertRecord(pool_insert_query)
            # print(database_response)
            print("Time taken in seconds for processing pool " + str(pool) +
                  " with " + str(len(pool_shares)) + " rides " +
                  str(difference * 0.0166667) + " minutes")
    except Exception as e:
        raise e
import numpy as np
import networkx as nx
from networkx.algorithms.matching import max_weight_matching

a = np.array([[3, 5, 5, 4, 1], [2, 2, 0, 2, 2], [2, 4, 4, 1, 0],
              [0, 2, 2, 1, 0], [1, 2, 1, 3, 3]])
b = np.zeros((10, 10))
b[0:5, 5:] = a
G = nx.Graph(b)
s0 = max_weight_matching(G)  # 返回值为(人员,工作)的集合
s = [sorted(w) for w in s0]
L1 = [x[0] for x in s]
L1 = np.array(L1) + 1  # 人员编号
L2 = [x[1] for x in s]
L2 = np.array(L2) - 4  # 工作编号
c = a[L1 - 1, L2 - 1]  # 提取对应的效益
d = c.sum()  # 计算总的效益
print("工作分配对应关系为:\n人员编号:", L1)
print("工作编号:", L2)
print("总的效益为:", d)
Example #25
0
    def test(self):
        test_encode, test_targets, test_loss, test_reg_loss, test_rec_loss = list(
        ), list(), 0.0, 0.0, 0.0
        full_test_encode = torch.zeros(
            torch.Size([len(self.test_loader.dataset),
                        self.model.z_dim])).to(self.device).detach()

        with torch.no_grad():
            for _, (x_test, y_test, idx) in enumerate(self.test_loader,
                                                      start=0):
                test_evals = self.trainer.rec_loss_on_test(x_test)
                full_test_encode[idx] = test_evals['encode'].detach()
                test_encode.append(test_evals['encode'].detach())
                test_rec_loss += test_evals['rec_loss'].item()

                test_targets.append(y_test)

            normalized_latents = self.normalize_latents(full_test_encode)
            if self.distribution == 'gaussflower':
                covered = self.gaussflower_covered(full_test_encode)
            elif self.distribution == 'gaussimplex':
                covered = self.gaussimplex_covered(full_test_encode)
            else:
                covered = visual.covered_area(normalized_latents)
            test_rec_loss /= len(self.test_loader)
            test_reg_loss = self.trainer.reg_loss_on_test().item()
            test_loss = test_rec_loss + self.trainer.reg_lambda * test_reg_loss
        test_encode, test_targets = torch.cat(
            test_encode).cpu().numpy(), torch.cat(test_targets).cpu().numpy()

        if self.distribution == 'gaussflower':
            _, indices = self.split_to_petals(full_test_encode)
            labels_as_petals = np.zeros((10, 10))
            for i in range(10):
                for idx in indices[i]:
                    labels_as_petals[i][test_targets[idx]] += 1
            p = [i for i in range(10)]
            l = [j + 10 for j in range(10)]
            weighted_edges = [(i, j + 10, labels_as_petals[i][j])
                              for i in range(10) for j in range(10)]
            B = nx.Graph()
            B.add_nodes_from(p, bipartite=0)
            B.add_nodes_from(l, bipartite=1)
            B.add_weighted_edges_from(weighted_edges)
            pairing = list(max_weight_matching(B, maxcardinality=True))
            pair_weight = 0
            for i in range(len(pairing)):
                if pairing[i][0] > pairing[i][1]:
                    pair_weight += labels_as_petals[pairing[i][1]][
                        pairing[i][0] - 10]
                else:
                    pair_weight += labels_as_petals[pairing[i][0]][
                        pairing[i][1] - 10]

        if self.distribution in ('gaussimplex', 'simplexsphere'):
            _, indices = self.split_to_vertex(full_test_encode)
            labels_as_vertex = np.zeros((10, 10))
            for i in range(10):
                print(len(indices[i]))
                for idx in indices[i]:
                    labels_as_vertex[i][test_targets[idx]] += 1
            p = [i for i in range(10)]
            l = [j + 10 for j in range(10)]
            weighted_edges = [(i, j + 10, labels_as_vertex[i][j])
                              for i in range(10) for j in range(10)]
            B = nx.Graph()
            B.add_nodes_from(p, bipartite=0)
            B.add_nodes_from(l, bipartite=1)
            B.add_weighted_edges_from(weighted_edges)
            pairing = list(max_weight_matching(B, maxcardinality=True))
            pair_weight = 0
            for i in range(len(pairing)):
                if pairing[i][0] > pairing[i][1]:
                    pair_weight += labels_as_vertex[pairing[i][1]][
                        pairing[i][0] - 10]
                else:
                    pair_weight += labels_as_vertex[pairing[i][0]][
                        pairing[i][1] - 10]

        neigh = NearestNeighbors(n_neighbors=10)
        neigh.fit(test_encode)
        num_good_points = 0
        for k in range(len(test_encode)):
            nbrs = neigh.kneighbors(test_encode[k].reshape(1, -1),
                                    10,
                                    return_distance=False)
            labels = list(test_targets[nbrs[0]])
            labels = set(labels)
            if len(labels) == 1:
                num_good_points += 1
        ratio = num_good_points / len(test_encode)

        #nat = self.trainer.sample_pz(len(self.train_loader.dataset)).to(self.device)
        #nat = nat.cpu().detach().numpy()
        #ratio_neighbor = visual.covered_neighborhood(test_encode, nat)

        #with open('ratio_{}_{}.txt'.format(self.trainer.trainer_type, self.trainer.reg_lambda), 'a') as file:
        #    file.write(str(ratio) + '\n')

        print('Test Epoch: {} ({:.2f}%)\tLoss: {:.6f}'.format(
            self.epoch + 1,
            float(self.epoch + 1) / (self.epochs) * 100., test_loss))
        neptune.send_metric('test_loss', x=self.global_iters, y=test_loss)
        neptune.send_metric('test_reg_loss',
                            x=self.global_iters,
                            y=test_reg_loss)
        neptune.send_metric('test_rec_loss',
                            x=self.global_iters,
                            y=test_rec_loss)
        neptune.send_metric('test_covered_area',
                            x=self.global_iters,
                            y=covered)
        neptune.send_metric('ratio_good_nn', x=self.global_iters, y=ratio)
        if self.distribution in ('gaussflower', 'gaussimplex',
                                 'simplexsphere'):
            neptune.send_metric('cluster_matching',
                                x=self.global_iters,
                                y=pair_weight)
            #neptune.send_metric('test_covered_neighbor', x=self.global_iters, y=ratio_neighbor)
        if len(test_targets.shape) == 2:
            test_targets = test_targets[:, self.trail_label_idx]

        self.plot_latent_2d(test_encode, test_targets, test_loss)

        if self.dataset in ('flower', 'square', 'disc', 'circle', 'snail'):
            self.plot_syn()
        else:
            with torch.no_grad():
                _, (x, _, _) = enumerate(self.test_loader, start=0).__next__()
                test_reconstruct = self.trainer.reconstruct(x)

                _, (x, _, _) = enumerate(self.train_loader, start=0).__next__()
                train_reconstruct = self.trainer.reconstruct(x)
                gen_batch = self.trainer.decode_batch(
                    self.trainer.sample_pz(n=self.batch_size))
                self.plot_images(x, train_reconstruct, test_reconstruct,
                                 gen_batch['decode'])
Example #26
0
 def run(self, players):
     if not isinstance(players, PlayersGraph):
         players = PlayersGraph().load_players(players)
     return max_weight_matching(players,
                                maxcardinality=False,
                                weight="churn_weight")
Example #27
0
def k_factor(G, k, matching_weight="weight"):
    """Compute a k-factor of G

    A k-factor of a graph is a spanning k-regular subgraph.
    A spanning k-regular subgraph of G is a subgraph that contains
    each vertex of G and a subset of the edges of G such that each
    vertex has degree k.

    Parameters
    ----------
    G : NetworkX graph
      Undirected graph

    weight: string, optional (default='weight')
       Edge data key corresponding to the edge weight.
       Used for finding the max-weighted perfect matching.
       If key not found, uses 1 as weight.

    Returns
    -------
    G2 : NetworkX graph
        A k-factor of G

    References
    ----------
    .. [1] "An algorithm for computing simple k-factors.",
       Meijer, Henk, Yurai Núñez-Rodríguez, and David Rappaport,
       Information processing letters, 2009.
    """

    from networkx.algorithms.matching import max_weight_matching
    from networkx.algorithms.matching import is_perfect_matching

    class LargeKGadget:
        def __init__(self, k, degree, node, g):
            self.original = node
            self.g = g
            self.k = k
            self.degree = degree

            self.outer_vertices = [(node, x) for x in range(degree)]
            self.core_vertices = [(node, x + degree)
                                  for x in range(degree - k)]

        def replace_node(self):
            adj_view = self.g[self.original]
            neighbors = list(adj_view.keys())
            edge_attrs = list(adj_view.values())
            for (outer, neighbor, edge_attrs) in zip(self.outer_vertices,
                                                     neighbors, edge_attrs):
                self.g.add_edge(outer, neighbor, **edge_attrs)
            for core in self.core_vertices:
                for outer in self.outer_vertices:
                    self.g.add_edge(core, outer)
            self.g.remove_node(self.original)

        def restore_node(self):
            self.g.add_node(self.original)
            for outer in self.outer_vertices:
                adj_view = self.g[outer]
                for neighbor, edge_attrs in list(adj_view.items()):
                    if neighbor not in self.core_vertices:
                        self.g.add_edge(self.original, neighbor, **edge_attrs)
                        break
            g.remove_nodes_from(self.outer_vertices)
            g.remove_nodes_from(self.core_vertices)

    class SmallKGadget:
        def __init__(self, k, degree, node, g):
            self.original = node
            self.k = k
            self.degree = degree
            self.g = g

            self.outer_vertices = [(node, x) for x in range(degree)]
            self.inner_vertices = [(node, x + degree) for x in range(degree)]
            self.core_vertices = [(node, x + 2 * degree) for x in range(k)]

        def replace_node(self):
            adj_view = self.g[self.original]
            for (outer, inner, (neighbor,
                                edge_attrs)) in zip(self.outer_vertices,
                                                    self.inner_vertices,
                                                    list(adj_view.items())):
                self.g.add_edge(outer, inner)
                self.g.add_edge(outer, neighbor, **edge_attrs)
            for core in self.core_vertices:
                for inner in self.inner_vertices:
                    self.g.add_edge(core, inner)
            self.g.remove_node(self.original)

        def restore_node(self):
            self.g.add_node(self.original)
            for outer in self.outer_vertices:
                adj_view = self.g[outer]
                for neighbor, edge_attrs in adj_view.items():
                    if neighbor not in self.core_vertices:
                        self.g.add_edge(self.original, neighbor, **edge_attrs)
                        break
            self.g.remove_nodes_from(self.outer_vertices)
            self.g.remove_nodes_from(self.inner_vertices)
            self.g.remove_nodes_from(self.core_vertices)

    # Step 1
    if any(d < k for _, d in G.degree):
        raise nx.NetworkXUnfeasible(
            "Graph contains a vertex with degree less than k")
    g = G.copy()

    # Step 2
    gadgets = []
    for node, degree in list(g.degree):
        if k < degree / 2.0:
            gadget = SmallKGadget(k, degree, node, g)
        else:
            gadget = LargeKGadget(k, degree, node, g)
        gadget.replace_node()
        gadgets.append(gadget)

    # Step 3
    matching = max_weight_matching(g,
                                   maxcardinality=True,
                                   weight=matching_weight)

    # Step 4
    if not is_perfect_matching(g, matching):
        raise nx.NetworkXUnfeasible(
            "Cannot find k-factor because no perfect matching exists")

    for edge in g.edges():
        if edge not in matching and (edge[1], edge[0]) not in matching:
            g.remove_edge(edge[0], edge[1])

    for gadget in gadgets:
        gadget.restore_node()

    return g
Example #28
0
  def break_mwop(self, tree, allow_gene_copies="No"):
    """
    Break a homology group into orthogroups
    where all proteins are orthologous to
    all proteins, using the
    Minimum Weight Orthogonal Partition (MWOP)
    criterion. A phylogenetic *species* tree
    is used to guide the order of actions in the
    algorithm. Optionally, the *gene* tree is used
    for keeping together recent gene copies.
    Returns a list of sets of gene names. Each
    set represents an orthogroup.
    See DOI:10.1007/978-3-642-23038-7_30 for
    details.
    """
    eps = 0.001
    tree = tree.copy()
    if allow_gene_copies == "No":
      allow_gene_copies = False

    # initialize gene sets
    genomes = [self.nodes[g]['genome'] for g in self.nodes]
    V = {genome: [] for genome in genomes}
    for gene in self.nodes:
      genome = self.nodes[gene]['genome']
      V[genome].append(set([gene]))
    # if allow_gene_copies mode is on, use gene tree
    # to cluster recent gene copies together by searching
    # for monophyletic groups (from the same genome)
    if allow_gene_copies and self.gene_tree:
      all_genomes = set([gene.genome for gene in self.gene_tree])
      if allow_gene_copies == "exclude_ref" and self.ref_genome_name and self.ref_genome_name in all_genomes:
        all_genomes.remove(self.ref_genome_name)
      gene_copies = []
      for genome in all_genomes:
        for node in self.gene_tree.get_monophyletic(values=[genome], target_attr="genome"):
          if not node.is_leaf():
            gene_copies.append([gene.name for gene in node.get_leaves()])

      # create sets of gene copies and remove single-gene sets
      # all genes in gene copies groups:
      rm = set(chain.from_iterable(gene_copies))
      for genome in V:
        new_Vi = []
        for s in V[genome]:
          if s == set() or next(iter(s)) not in rm:
            new_Vi.append(s)
        V[genome] = new_Vi
      for cp in gene_copies:
        cp_genome = self.nodes[cp[0]]['genome']
        V[cp_genome].append(set(cp))
    # complete with empty sets
    nm = max([len(l) for l in V.values()])
    for genome in V:
      V[genome] = fill_list_to_length(V[genome], nm, set())

    # remove tree leaves with no genes
    for genome in tree:
      if genome.name not in V:
        genome.delete()

    # traverse tree and create new orthogroups
    n_leaves = len(tree.get_tree_root())
    while n_leaves > 1:
      # find two closest genomes based on tree
      dist_matrix = tree_to_distance_matrix(tree, sister_only=True)
      closest_genomes = row_col_min(dist_matrix)
      # create bipartite graph (BG)
      # (BG has integers as labels, so Vi_d and Vj_d
      # store the integer --> gene set mapping)
      Vi = V[closest_genomes[0]]
      Vi_d = {n: Vi[n] for n in range(nm)}
      Vj = V[closest_genomes[1]]
      Vj_d = {n: Vj[n-nm] for n in range(nm,nm*2)}
      bipartite_graph = bipartite.complete_bipartite_graph(nm,nm)
      # assign weights to BG edges
      for edge in bipartite_graph.edges:
        # look for corresponding edge in homology graph (HG)
        # first, translate BG integers to gene sets
        g1 = Vi_d[edge[0]]
        g2 = Vj_d[edge[1]]
        # calculate edge weights as mean of weights between sets
        pairs = product(g1,g2)
        total_weight = 0
        pairs_involved = 0
        for p in pairs:
          if p in self.edges: # orthologs
            total_weight += self.edges[p]['weight']
          else: # paralogs
            total_weight += -eps
          pairs_involved += 1
        if pairs_involved == 0: # when using an empty set
          bipartite_graph.edges[edge]['weight'] = eps
        else:
          bipartite_graph.edges[edge]['weight'] = total_weight/pairs_involved
      # find BG maximum weight matching (MWM)
      # ensure matches are always from Vi to Vj
      mwm = matching.max_weight_matching(bipartite_graph,maxcardinality=True)
      mwm = {(min(x),max(x)) for x in mwm}
      # update gene sets according to MWM
      # this is done by set unions on every matching
      new_Vi = []
      for match in mwm:
        g1 = Vi_d[match[0]]
        g2 = Vj_d[match[1]]
        g1 = g1.union(g2)
        new_Vi.append(g1)
      V[closest_genomes[0]] = new_Vi
      # then remove other genome from V
      del(V[closest_genomes[1]])
      # update tree - remove leaf corresponding to Vj
      # but update branch length of Vi to mean of branch
      # lengths of Vi and Vj
      Vj_bl = (tree&closest_genomes[1]).dist
      j = tree.search_nodes(name=closest_genomes[1])[0]
      j.delete()
      Vi_bl = (tree&closest_genomes[0]).dist
      new_Vi_bl = (Vi_bl + Vj_bl)/2
      (tree&closest_genomes[0]).dist = new_Vi_bl
      # calculate number of leaves left
      n_leaves = len(tree.get_tree_root())

    # finish when all leaves were merged
    # return a list of graphs, each is an OG
    ogs_list = list(V.values())[0]
    # create list of empty graphs
    ogs_graph_list = [ nx.Graph() for og in ogs_list ]
    for i in range(len(ogs_list)):
      # populate with nodes
      ogs_graph_list[i].add_nodes_from([(n, {'genome': self.nodes[n]['genome']}) for n in ogs_list[i]])
      # assign OG name
      ogs_graph_list[i].orthogroup = "%s.%s" %(self.orthogroup, i)
    return ogs_graph_list
def max_weight(Gph, maxcardinality=True):
    return nx.Graph(max_weight_matching(Gph, maxcardinality))
Example #30
0
# -*- coding: utf-8 -*-

from Driver import Driver
from Rider import Rider
import networkx as nx
from networkx.algorithms.matching import max_weight_matching
from Auxiliary import get_shared_route_percentage
import matplotlib.pyplot as plt

driver = Driver()
rider = Rider()
shared_route_percentage = get_shared_route_percentage(driver, rider)
G = nx.Graph()
G.add_edge(driver, rider, weight=shared_route_percentage)
S = max_weight_matching(G)
print(S)
matching = nx.bipartite.maximum_matching(G)
print(matching)
Example #31
0
def pick_a_ride(pool_map, origin, pool_window_time):
    global total_pools_running_time, total_individual_trips, total_time_delta_minutes, total_saved_trips, fromLaguardiaPoolsProcessedCount, toLaguardiaPoolsProcesedCount
    try:

        print("Forming pools for the origin " + origin + " for pool window " +
              str(pool_window_time))
        if len(pool_map.keys()) == 0:
            print("No trips present to form pools for the given origin " +
                  origin)
        else:

            pools_ending_time = timedelta(
                total_time_delta_minutes).total_seconds()
            poolsCount = 0
            cumulative_pools_processing_time = 0
            rideLabel = ""
            if origin == "From Laguardia":
                rideLabel = "From LaGuardia"
            else:
                rideLabel = "To LaGuardia"

            # Get the list of rides within the pool window period
            for pool in pool_map:
                G.clear()
                total_distance_saved = 0
                ride_shared_nodes_count = 0
                print("POOL ID ", pool)
                pool_shares = pool_map[pool]
                print("Number of requests ", len(pool_shares))
                total_individual_trips = total_individual_trips + len(
                    pool_shares)
                number_of_rides = len(pool_shares)
                isRideSharingDone = False
                start_time = datetime.utcnow()
                final_array = []
                index1 = 0
                length = len(pool_shares)
                rideIDS = set()
                rideIDSWithDistance = dict()
                # Print the obtained combinations
                while index1 < length:
                    rideIDS.add(pool_shares[index1][0])
                    rideIDSWithDistance[pool_shares[index1]
                                        [0]] = pool_shares[index1][6]
                    index2 = index1 + 1
                    while index2 < length:
                        rideA = pool_shares[index1]
                        rideB = pool_shares[index2]
                        distance_saved = sharing_condition(
                            rideA, rideB, origin)
                        # if distance_saved is > 0, it means ride-sharing condition has been satisfied
                        if distance_saved > 0:
                            G.add_node(rideA[0])
                            G.add_node(rideB[0])
                            # store the values with ride id a and b along with the max distance saved
                            G.add_edge(rideA[0],
                                       rideB[0],
                                       weight=distance_saved)
                        index2 = index2 + 1
                    index1 = index1 + 1

                # Means that there is at least 1 pair satisfying ride-sharing condition
                if G.number_of_nodes() > 0:

                    # run maximum matching algorithm for ride-shareable graph G
                    ride_shareable_nodes = max_weight_matching.max_weight_matching(
                        G)

                    ride_shared_nodes_count = len(ride_shareable_nodes)
                    # Remove ride IDS that are paired from the array of RideIDS to filter non-shared Ride ids
                    for nodes in ride_shareable_nodes:
                        if nodes[0] in rideIDS:
                            rideIDS.remove(nodes[0])
                            del rideIDSWithDistance[nodes[0]]
                        if nodes[1] in rideIDS:
                            rideIDS.remove(nodes[1])
                            del rideIDSWithDistance[nodes[1]]
                        total_distance_saved = total_distance_saved + G.get_edge_data(
                            nodes[0], nodes[1])['weight']

                    total_distance_saved = float(total_distance_saved)
                    total_saved_trips = total_saved_trips + len(
                        pool_shares) - (
                            (ride_shared_nodes_count) + len(rideIDS))
                    # get the end time meaning that pool processing is complete
                    end_time = datetime.utcnow()
                    difference = end_time - start_time
                    # Keep track of cumulative time that is elapsed
                    cumulative_pools_processing_time = cumulative_pools_processing_time + difference.total_seconds(
                    )
                    total_pools_running_time = total_pools_running_time + cumulative_pools_processing_time
                    difference = float(difference.total_seconds())
                    record_entry = datetime.utcnow()
                    # store in db
                    pool_insert_query = "insert into pool_details (pool_id,count_of_rides,time_taken,dist_saved,rideLabel,pool_window,record_entry) values (" + \
                                        str(pool) + "," + str(len(ride_shareable_nodes)) + "," + str(
                        difference) + "," + str(
                        total_distance_saved) + "," + "\"" + rideLabel + "\"," + str(pool_window_time) + ",\"" + str(
                        record_entry) + "\");"
                    print(pool_insert_query)
                    database_response = insertRecord(pool_insert_query)
                    print(database_response)
                    isRideSharingDone = True

                    # Insert ride-sharable requests as individual trips
                    for nodes in ride_shareable_nodes:
                        tripID = random_trip_Ids.pop()
                        trip_detail_insert_query = "insert into trip_details (trip_id,pool_id,rideT_id,isRideShared,rideLabel,record_entry) " \
                                                   "values (" + \
                                                   str(tripID) + "," + str(pool) + "," + str(
                            nodes[0]) + "," + "1" + "," + "\"" + rideLabel + "\"" + \
                                                   ",\"" + str(record_entry) + "\");"
                        insertRecord(trip_detail_insert_query)
                        trip_detail_insert_query = "insert into trip_details (trip_id,pool_id,rideT_id,isRideShared,rideLabel,record_entry) values (" + \
                                                   str(tripID) + "," + str(pool) + "," + str(
                            nodes[1]) + "," + "1" + "," + "\"" + rideLabel + "\"" + \
                                                   ",\"" + str(record_entry) + "\")"
                        insertRecord(trip_detail_insert_query)
                record_entry = datetime.utcnow()
                # If ridesharing is not done, no trips are combined.
                if not isRideSharingDone:
                    end_time = datetime.utcnow()
                    difference = end_time - start_time
                    difference = float(difference.total_seconds())

                    dist_saved = sum(rideIDSWithDistance.values())
                    # store in db
                    pool_insert_query = "insert into pool_details (pool_id,count_of_rides,time_taken,dist_saved,rideLabel,pool_window,record_entry) values (" + \
                                        str(pool) + "," + str(len(rideIDS)) + "," + str(difference) + "," + str(
                        dist_saved) + "," + "\"" + rideLabel + "\"," + str(pool_window_time) + ",\"" + str(
                        record_entry) + "\");"
                    print(pool_insert_query)
                    database_response = insertRecord(pool_insert_query)
                    # if origin == "From Laguardia":
                    #     fromLaguardiaPoolsProcessedCount = fromLaguardiaPoolsProcessedCount + 1
                    # else:
                    #     toLaguardiaPoolsProcesedCount = toLaguardiaPoolsProcesedCount + 1
                    # print(database_response)
                else:
                    dist_saved = sum(rideIDSWithDistance.values())
                    # Update pool entry to update count to the sum of existing value+ count of individual trips
                    update_pool_query = "update pool_details set count_of_rides=" + str(
                        ride_shared_nodes_count + len(rideIDS)) + \
                                        ",dist_saved=" + str(
                        total_distance_saved + dist_saved) + " where pool_id=" + str(pool)
                    print(update_pool_query)
                    insertRecord(update_pool_query)
                # insert records that are not ride-shared
                for rideID in rideIDS:
                    # store in db
                    tripID = random_trip_Ids.pop()
                    trip_detail_insert_query = "insert into trip_details (trip_id,pool_id,rideT_id,isRideShared," \
                                               "rideLabel,record_entry) values (" + \
                                               str(tripID) + "," + str(pool) + "," + str(
                        rideID) + "," + "0" + "," + "\"" + rideLabel + "\"" + \
                                               ",\"" + str(record_entry) + "\")"
                    insertRecord(trip_detail_insert_query)
                if origin == "From Laguardia":
                    fromLaguardiaPoolsProcessedCount = fromLaguardiaPoolsProcessedCount + 1
                else:
                    toLaguardiaPoolsProcesedCount = toLaguardiaPoolsProcesedCount + 1

                print("Time taken in seconds for processing pool " +
                      str(pool) + " with " + str(len(pool_shares)) +
                      " rides " + str(difference * 0.0166667) + " minutes")
    except Exception as e:
        raise e
Example #32
0
def time_maximum_matching(G):
    time1 = time()
    s = max_weight_matching(G)
    time2 = time()
    return len(s), time2 - time1