def solve_whole_local_solver(manager_stop,config):
    """
    Solve the problem as a whole, for the allocated time
    :param manager_stop: the manager stop as a whole
    :param config: the associated config
    :return: tot_vehi
    """
    tree_finder = find_best_tree.FindBestTree()
    dict_feature = derivingFeaturesCreatedDataset.DerivingFeaturesCreatedDataset(man_ref)
    tree, dict_cst_computed, dict_leaf_label_computed, dict_proba_computed, dict_dispersion = tree_finder.find_cst_best_tree()
    list_cst = [cst for l in dict_cst_computed.values() for cst in l]
    list_features = [cst.get_feature_name(dict_feature) for cst in list_cst]
    list_features=list(set(list_features))
    router = cvrptw_routing_solver.RoutingSolverCVRPTW(manager_stop,config,time_routing=3600)
    num_vehicle,tot_distance,list_routes = router.solve_parse_routing()

    number_vehicle_predicted = 0
    acc = 0
    for route in list_routes:
        new_manager = stops_manager_cvrptw.StopsManagerCVRPTW.from_sublist(route,manager_stop)
        new_cluster = cluster.Cluster.from_manager_stops(manager_stop=new_manager,
                                                         guid='clu_0',
                                                        tree=tree,
                                                         dict_cst= dict_cst_computed,
                                                         list_useful_features=list_features,
                                                         dict_disp=dict_dispersion)
        print('For the route we have a prediction of ', new_cluster.prediction)
        number_vehicle_predicted += new_cluster.prediction
        if new_cluster.prediction == 1:
            acc += 1


    print('Benchmark local solver ', num_vehicle, ' for prediction according to tree ', number_vehicle_predicted, ' and accuracy ',acc/len(list_routes))
    return number_vehicle_predicted,len(list_routes),acc/len(list_routes), num_vehicle,"LocalSolver"
    def _add_new_features(self, list_manager_stop, list_vehi):
        """
        Compute the features of the manager stops to be added
        :param list_manager_stop: a list of manager stop object
        :param list_vehi: a list of number of vehicle obtained via the routing
        :return:
        """
        data = []

        for i, manager_stop in enumerate(list_manager_stop):
            feature_object = derivingFeaturesCreatedDataset.DerivingFeaturesCreatedDataset(
                manager_stop)
            dict_features = feature_object.derive_features()
            dict_features['class_label'] = min(6, list_vehi[i])
            dict_features['iteration'] = self.iteration
            data.append(dict_features)

        header = True
        if os.path.exists(useful_paths.FILE_TO_TREE_CVRPTW):
            header = False

        data_results = pd.DataFrame(data)
        data_results.to_csv(useful_paths.FILE_TO_TREE_CVRPTW,
                            header=header,
                            index=False,
                            mode="a")
Пример #3
0
def derive_feature_created(manager_stops):
    """
    From the manager containing all the relevant stops, derive the relevant aggregated features
    :param manager_stops: a manager stop containing all stops
    :return: a dict['feature name'] = feature value
    """
    feature_object = derivingFeaturesCreatedDataset.DerivingFeaturesCreatedDataset(
        manager_stops)

    return feature_object.derive_features()
    def __init__(self, manager_stop, cluster_stop, dict_leaf_const,
                 initial_leaf_id):
        self.manager_stop = manager_stop
        self.cluster_stop = cluster_stop  # inherits from manager stop, but corresponds to the initial cluster
        self.featurer = derivingFeaturesCreatedDataset.DerivingFeaturesCreatedDataset(
            manager_stop=self.cluster_stop)
        self.current_dict_feature = self._get_feature()

        self.dist_list = self._compute_dist_list()
        self.dict_leaf_const = dict_leaf_const
        self.current_leaf_id = initial_leaf_id
Пример #5
0
    def from_row(cls, row, manager_ref, tree, dict_cst, list_useful_features,
                 dict_disp):
        """
        Create a cluster from a row
        :param row: the row from which we read the stop
        :param manager_ref: the manager reference
        :param tree: a tree object
        :param dict_cst: a dict of constraints
        :param list_useful_features: a list of features
        :return: a cluster object
        """
        new_cluster = cls(depot=manager_ref.depot,
                          guid=row['guid'],
                          tree=tree,
                          dict_cst=dict_cst,
                          dict_disp=dict_disp)
        new_cluster.matrix_stops_dist = manager_ref.matrix_stops_dist
        new_cluster.matrix_depot_dist = manager_ref.matrix_depot_dist

        list_stop = ast.literal_eval(row['list_stop'])
        for stop_id in list_stop:
            new_cluster[stop_id] = manager_ref[stop_id]

        # new_cluster.featurer = derivingFeaturesCreatedDatasetLinear.DerivingFeaturesCreatedDatasetLinear(new_cluster,list_useful_features)
        new_cluster.featurer = derivingFeaturesCreatedDataset.DerivingFeaturesCreatedDataset(
            new_cluster, list_useful_features)
        new_cluster.dict_features = {}
        for c in row.keys():
            if not c in ['guid', 'list_stop', 'tracking_evolution']:
                new_cluster.dict_features[c] = row[c]

        # df_feature = pd.DataFrame([new_cluster.dict_features])
        # index_leaf = tree.apply(df_feature)[0]
        index_leaf = new_cluster._get_leaf(new_cluster.dict_features)
        label = tree.get_classification_label(index_leaf)

        # Make sure that we don't sky rocket in terms of number of vehicles.
        if label >= 4.5:
            label = 1000
            expected_label = 1000
        else:
            expected_label = sum(
                int(nb_vehicle) * proba for nb_vehicle, proba in
                tree.get_classification_proba(index_leaf).items())

        new_cluster.prediction = label
        new_cluster.expected_prediction = expected_label
        new_cluster.leaf = index_leaf
        new_cluster.tracking_evolution = ast.literal_eval(
            row['tracking_evolution'])

        return new_cluster
Пример #6
0
    def from_manager_stops(cls, manager_stop, guid, tree, dict_cst,
                           list_useful_features, dict_disp):
        """
        Create a cluster from a manger stop
        :param manager_stop: the manager considered
        :param guid: the guid of the cluster
        :param tree: a tree object
        :param dict_cst: a dict of constraints
        :param list_useful_features: a list of features
        :return: a cluster object
        """
        new_cluster = cls(depot=manager_stop.depot,
                          guid=guid,
                          tree=tree,
                          dict_cst=dict_cst,
                          dict_disp=dict_disp)
        new_cluster.matrix_stops_dist = manager_stop.matrix_stops_dist
        new_cluster.matrix_depot_dist = manager_stop.matrix_depot_dist

        for stop_id in manager_stop.keys():
            new_cluster[stop_id] = manager_stop[stop_id]

        # new_cluster.featurer = derivingFeaturesCreatedDatasetLinear.DerivingFeaturesCreatedDatasetLinear(new_cluster,list_useful_features)
        new_cluster.featurer = derivingFeaturesCreatedDataset.DerivingFeaturesCreatedDataset(
            new_cluster, list_useful_features)
        new_cluster.dict_features = new_cluster.featurer.derive_features()

        # df_feature = [list(new_cluster.dict_features.values())]
        # index_leaf = tree.apply(df_feature)[0]
        index_leaf = new_cluster._get_leaf(new_cluster.dict_features)
        label = tree.get_classification_label(index_leaf)

        # Make sure that we don't sky rocket in terms of number of vehicles.
        if label >= 4.5:
            label = 1000
            expected_label = 1000
        else:
            expected_label = sum(
                int(nb_vehicle) * proba for nb_vehicle, proba in
                tree.get_classification_proba(index_leaf).items())

        new_cluster.prediction = label
        new_cluster.expected_prediction = expected_label
        new_cluster.leaf = index_leaf

        return new_cluster
def solve_via_cluster(manager_stop,config,perce):
    """
    Route the whole manager sotp with respect to the config via naive Kmean
    :param manager_stop:
    :param config:
    :param perce: percentage of the vehicle's capacity.
    :return:
    """
    tree_finder = find_best_tree.FindBestTree()
    dict_feature = derivingFeaturesCreatedDataset.DerivingFeaturesCreatedDataset(man_ref)
    tree, dict_cst_computed, dict_leaf_label_computed, dict_proba_computed, dict_dispersion = tree_finder.find_cst_best_tree()
    list_cst = [cst for l in dict_cst_computed.values() for cst in l]
    list_features = [cst.get_feature_name(dict_feature) for cst in list_cst]
    list_features=list(set(list_features))

    config.cluster_method = "NAIVE"
    clustering_object = perform_clustering.StopClustering()
    list_cluster = clustering_object.perform_clustering(manager_stop,config,perce)

    print("Number of clusters ", len(list_cluster))
    tot_vehi = 0
    predicted= 0
    acc = 0

    for clu in tqdm(list_cluster,desc='Routing in naive benchmark'):
        man_stop_clus = stops_manager_cvrptw.StopsManagerCVRPTW.from_sublist(list_stops=[stop.guid for stop in clu.list_stops],reference_manager_stop=manager_stop)
        cluster_object = cluster.Cluster.from_manager_stops(man_stop_clus,'clu_0',tree,dict_cst_computed,list_features,dict_dispersion)
        predicted += cluster_object.prediction
        current_manager_stops = stops_manager_cvrptw.StopsManagerCVRPTW.init_from_cluster(clu)
        current_manager_stops.set_depot(manager_stop.depot)

        router = cvrptw_routing_solver.RoutingSolverCVRPTW(current_manager_stops,config)
        num_vehicle,tot_distance,list_routes = router.solve_parse_routing()
        tot_vehi += num_vehicle

        if num_vehicle ==  cluster_object.prediction:
            acc +=1

    print("Benchmark naive Kmean", tot_vehi,"for predicted ", predicted, " accuracy ", acc/len(list_cluster))
    return predicted,len(list_cluster),acc/len(list_cluster), tot_vehi,"Kmean"
    def _find_predic_leaf(self,clust):
        """
        Find in which leaf it belongs to
        :param clust: the cluster considered
        :return: a leaf
        """
        object_feature = derivingFeaturesCreatedDataset.DerivingFeaturesCreatedDataset(manager_stop=clust)
        dict_feature = object_feature.derive_features()

        for leaf_id in self.dict_cst:
            list_cst = self.dict_cst[leaf_id]
            has_found = True

            for cst in list_cst:
                if not cst.is_respected(dict_feature):
                    has_found = False
                    break

            if has_found:
                return leaf_id

        else:
            assert False