Ejemplo n.º 1
0
def simulated_anealing(graph, step=10000, a=0.5, q=1000, t=50, t_min=0.000001, count_max=100):
    count = 0
    n = len(graph)
    s = np.random.permutation(n).tolist()  # initil solution

    for i in range(step):
        s_next = find_better_solusion(get_neighbors(s), s, graph)

        e = get_cost(s, graph)
        e_next = get_cost(s_next, graph)

        if s == s_next:
            count += 1
        else:
            count = 0

        if e_next < e:
            s = s_next
        else:
            if random.random() <= probability(e, e_next, t):
                s = s_next

        if i % q == 0:  # Equilibrium state
            t = a * t

        if count > count_max:
            return s

    return s
Ejemplo n.º 2
0
def main():
    p = argparse.ArgumentParser(
        description='This script is for solve TSP with simulated anealing')
    p.add_argument('-g', '--graph', type=str,
                   help='path to graph csv file', required=True)

    option_args = p.parse_known_args()[0]
    path = option_args.graph

    if not os.path.exists(path):
        print("File not found")
        sys.exit(1)

    graph = read_graph(path)
    s = simulated_anealing(graph)
    print('Answer')
    print("Path:", s, ", Cost:", get_cost(s, graph))
Ejemplo n.º 3
0
def get_mlp_model(n_in, n_out, n_layers=2, n_hidden=50):
    assert n_layers >= 2, '`n_layers` should be greater than 1 (otherwise it is just an mlp)'

    # initialize weights
    weights = [utils.get_weights('w_1', n_in, n_hidden)]
    weights += [utils.get_weights('w_%d' % i, n_hidden, n_hidden) for i in range(2, n_layers)]
    weights += [utils.get_weights('w_%d' % n_layers, n_hidden, n_out)]

    # initialize biases
    biases = [utils.get_weights('b_%d' % i, n_hidden) for i in range(1, n_layers)]
    biases += [utils.get_weights('b_%d' % n_layers, n_out)]

    # binarized versions
    deterministic_binary_weights = [utils.binarize(w, mode='deterministic') for w in weights]
    stochastic_binary_weights = [utils.binarize(w, mode='stochastic') for w in weights]

    # variables
    lr = T.scalar(name='learning_rate')
    X = T.matrix(name='X', dtype=theano.config.floatX)
    y = T.matrix(name='y', dtype=theano.config.floatX)

    # generate outputs of mlps
    d_outs = [utils.hard_sigmoid(T.dot(X, deterministic_binary_weights[0]) + biases[0])]
    for w, b in zip(deterministic_binary_weights[1:], biases[1:]):
        d_outs.append(utils.hard_sigmoid(T.dot(d_outs[-1], w) + b))
    s_outs = [utils.hard_sigmoid(T.dot(X, stochastic_binary_weights[0]) + biases[0])]
    for w, b in zip(stochastic_binary_weights[1:], biases[1:]):
        s_outs.append(utils.hard_sigmoid(T.dot(s_outs[-1], w) + b))

    # cost function (see utils)
    cost = utils.get_cost((s_outs[-1]+1.)/2., (y+1.)/2., mode='mse')

    # get the update functions
    params = weights + biases
    grads = [T.grad(cost, p) for p in stochastic_binary_weights + biases]
    updates = [(p, T.clip(p - lr * g, -1, 1)) for p, g in zip(params, grads)]

    # generate training and testing functions
    train_func = theano.function([X, y, lr], [cost], updates=updates)
    test_func = theano.function([X], [d_outs[-1]])
    grads_func = theano.function([X, y], grads)
    int_output_func = theano.function([X], s_outs + d_outs)

    return train_func, test_func, grads_func, weights + biases, int_output_func
Ejemplo n.º 4
0
def experiment(solver, graph):
    results, times = time_mesure(solver, graph)

    costs = [get_cost(r, graph) for r in results]

    # tabu list result
    c_max = max(costs)
    c_min = min(costs)
    c_ave = np.average(costs)

    t_max = max(times)
    t_min = min(times)
    t_ave = np.average(times)

    table = [["Cost"] + [c_max] + [c_min] + [c_ave],
             ["Time [s]"] + [t_max] + [t_min] + [t_ave]]

    header = [" ", "MAX", "MIN", "AVE"]

    return tabulate(table, header, tablefmt="pipe")
def MAENS(start, graph, tasks, time_limit, mul_process=False):
    """
    :param start: the start run time of this program
    :param time_limit: the run time limit of MAENS algorithms
    :param graph: is the graph read from data set, include following attributes:
            {"adjacent_matrix": the adjacent matrix presentation of the graph,
             "adjacent_table": the adjacent table presentation of the graph,
             "demand_matrix": demand_matrix,
             "dijkstra_matrix": dijkstra_matrix[i][j] store the minimum distance between node i and node j
             "basic_infor": store the basic information of this graph, e.g., depot, capacity, required edges and so on,
            }
    :param tasks: an edge tuple list, store all the task need to be finish,
                  notice, if (i, j) in tasks, then (j, i) in it also.
    :param mul_process: run MAENS on multiprocessing or not.
    :return: a feasible solution feasible within time limits.
            Example:
            --------
            s 0,(1,2),(2,3),(3,5),0,0,(1,4),(4,2),(2,9),(4,3),(5,6),0,0,(1,12),(12,6),(6,7),(7,12),0,0,(1,10),(10,9),
            (9,11),(11,5),(5,12),0,0,(1,7),(7,8),(8,10),(10,11),(11,8),0
            q 316
    """
    # initialization multiprocessing setting
    if mul_process:
        pool = Pool(processes=16)
        global Graph
        global pop_t
        global best_feasible_solution
        Graph = graph

    # generate a initial population by path_scanning
    # TODO support more initial population algorithms, e.g, Floyd Algorithm
    pop = init_population(graph, tasks, print_initial=True)
    stop_criterion = 0
    while stop_criterion != Configuration.generation_cnt:
        # initial an iteration
        pop_t = []
        for p in pop:  # set an intermediate population pop_t = pop notice, pop_t is a heap and pop is a list
            heapq.heappush(pop_t, (evaluation_solution(p, graph, 0), p))
        for p in pop:
            if is_feasible(p, graph):
                best_feasible_solution['cost'] = get_cost(p, graph)
                best_feasible_solution['solution'] = p
                break
        # multi processing code
        if mul_process:
            results = []
            for i in range(Configuration.opsize):
                result = pool.apply_async(
                    func=crossover_plus_local_search,
                    args=(pop, graph, time_limit, start,
                          best_feasible_solution['cost']),
                    callback=update_pop_t)
                results.append(result)
            # wait an iteration to finish
            for r in results:
                r.wait()
            if time_limit - (time.time() - start) < 3:
                pool.close()
                pool.join()
                print_result(best_feasible_solution['solution'],
                             graph,
                             paint=True)
                return 0
        # single processing code
        else:
            for i in range(Configuration.opsize):
                a, b = generate_two_random(0, len(pop) - 1)
                s_x = crossover(pop[a], pop[b], graph)
                r = generate_one_random(0, 1, flt=True)
                lmd = get_initial_lmd(best_feasible_solution['cost'], s_x,
                                      graph)
                if r < Configuration.p_ls:
                    s_ls, lmd = local_search(s_x, graph, lmd)
                    if not is_clone(s_ls, pop_t):
                        heapq.heappush(
                            pop_t,
                            (evaluation_solution(s_ls, graph, lmd), s_ls))
                    elif not is_clone(s_x, pop_t):
                        heapq.heappush(
                            pop_t, (evaluation_solution(s_x, graph, lmd), s_x))
                elif not is_clone(s_x, pop_t):
                    heapq.heappush(pop_t,
                                   (evaluation_solution(s_x, graph, lmd), s_x))
        # update pop with pop_t
        for i in range(len(pop)):
            item = heapq.heappop(pop_t)
            pop[i] = [task for task in item[1] if task != []]
        run_time = (time.time() - start)
        print("offspring %d generate current run_time is: %f" %
              (stop_criterion + 1, run_time))
        print("current best feasible solution is:", best_feasible_solution)
        stop_criterion += 1
    print_result(best_feasible_solution['solution'], graph, paint=True)
    return 0
Ejemplo n.º 6
0
def main(data, target, args):
    # Names for various stuff
    model_name = 'model_{0}_{1}_{2}.lp'.format(args.data, args.kernel, args.function)
    param_name = 'model_{0}_{1}_{2}.prm'.format(args.data, args.kernel, args.function)
    solution_name = 'solution_{0}_{1}_{2}.sol'.format(args.data, args.kernel, args.function)
    ultrametric_name = 'ultrametric_{0}_{1}_{2}'.format(args.data, args.kernel, args.function)
    var_name = 'var_{0}_{1}_{2}_{3}.pkl'.format(args.data, args.data, args.kernel, args.function)
    obj_name = 'obj_{0}_{1}_{2}_{3}.pkl'.format(args.data, args.data, args.kernel, args.function)
    laminar_name = 'laminar_{0}_{1}_{2}.pkl'.format(args.data, args.kernel, args.function)
    tree_name = 'lp_tree_{0}_{1}_{2}_{3}.pdf'.format(args.data, args.kernel, args.function, args.eps)
    err_dict_name = 'error_{0}_{1}_{2}_{3}.txt'.format(args.data, args.kernel, args.function, args.eps)


    # Test other hierarchical clustering algorithms
    one_target = map(lambda x: x + 1, target)
    k = args.prune
    y = pdist(data, metric='euclidean')
    Z = []
    Z.append(hac.linkage(y, method='single'))
    Z.append(hac.linkage(y, method='complete'))
    Z.append(hac.linkage(y, method='average'))
    ward = hac.linkage(data, method='ward')
    Z.append(ward)
    errors = []
    while Z:
        x = Z.pop(0)
        pred = hac.fcluster(x, k, 'maxclust')
        # print('pred = ', pred)
        err = utils.error(list(pred), one_target)
        errors.append(err)
    # K means
    clf = KMeans(k)
    pred = clf.fit_predict(data)
    pred = map(lambda x: x + 1, pred)
    err = utils.error(list(pred), one_target)
    errors.append(err)
    # print('kmeans = ', pred)
    error_dict = {'single linkage': errors[0], 'complete linkage': errors[1], 'average linkage': errors[2], 'ward': errors[3]}
    error_dict['kmeans'] = errors[4]
    print(error_dict)

    # initialize model
    if args.function == 'linear':
        m = init_model(data, args.kernel, args.triangle, utils.linear)
    if args.function == 'quadratic':
        m = init_model(data, args.kernel, args.triangle, utils.quadratic)
    if args.function == 'cubic':
        m = init_model(data, args.kernel, args.triangle, utils.cubic)
    if args.function == 'logarithm':
        m = init_model(data, args.kernel, args.triangle, utils.logarithm)
    if args.function == 'exponential':
        m = init_model(data, args.kernel, args.triangle, utils.exponential)
    m._n = data.shape[0]

    # Check if reading solution from file
    if args.solution:
        print('Reading LP solution from ', args.solution)
        solution_dict = read_solution(m, args.solution)
    else:
        start = time.time()
        print('Optimizing over model')
        m.optimize()
        flag = args.triangle
        while flag and time.time() - start < args.time:
            print("Time_diff = {}".format(time.time() - start))
            m.optimize()
            # Feed solution to separation oracle
            flag = separation_oracle(m, args.triangle)
        end = time.time()
        print('Total time to optimize = {0}'.format(end - start))
        print('Writing solution to ', solution_name)
        m.write(solution_name)
        print('Saving model to ', model_name)
        m.write(model_name)
        solution_dict = get_solution_dict(solution_name)

    # print('Triangle inequality satisfied: ', check_triangle_constraints(m))
    # print('Spreading constraints satisfied: ', check_spreading_constraints(m))

    # Get ultrametric from LP
    print('Rounding LP')
    if args.function == 'linear':
        d = get_ultrametric_from_lp(m, solution_dict, args.eps, utils.linear)
        utils.inverse_ultrametric(d, utils.inverse_linear)
    elif args.function == 'quadratic':
        d = get_ultrametric_from_lp(m, solution_dict, args.eps, utils.quadratic)
        utils.inverse_ultrametric(d, utils.inverse_quadratic)
    elif args.function == 'cubic':
        d = get_ultrametric_from_lp(m, solution_dict, args.eps, utils.cubic)
        utils.inverse_ultrametric(d, utils.inverse_cubic)
    elif args.function == 'exponential':
        d = get_ultrametric_from_lp(m, solution_dict, args.eps, utils.exponential)
        utils.inverse_ultrametric(d, utils.inverse_exponential)
    elif args.function == 'logarithm':
        d = get_ultrametric_from_lp(m, solution_dict, args.eps, utils.logarithm)
        utils.inverse_ultrametric(d, utils.inverse_logarithm)

    print('d = ', d)
    cost = utils.get_cost(m, d)
    print('Cost of hierarchy: ', cost)
    print('Check ultrametric: ', utils.check_ultrametric(d))
    # print(d)
    total_obj = utils.get_total(m)
    print('Total objective = ', total_obj)
    print('Scaled cost = ', cost/total_obj)

    utils.complete_ultrametric(d)
    print('Building laminar list')
    L = utils.build_laminar_list(d)
    # print('Laminar list = ', L)
    print('Check laminar: ', utils.test_laminar(L))
    labels = [1]*m._n
    pruned = utils.prune(L, one_target, k, labels)
    print('Error on pruning: ', pruned[0])
    error_dict['lp rounding'] = pruned[0]
    with open(err_dict_name, 'wb') as f:
        f.write(str(error_dict))

    # Build and draw the hierarchy
    G = utils.build_hierarchy(d)
    print('Drawing tree to ', tree_name)
    utils.draw(G, target, m._n, tree_name)
Ejemplo n.º 7
0
def main(data, target, args):
    model_name = 'model_{0}_{1}_{2}.lp'.format(args.data, args.kernel,
                                               args.function)
    param_name = 'model_{0}_{1}_{2}.prm'.format(args.data, args.kernel,
                                                args.function)
    solution_name = 'solution_{0}_{1}_{2}.sol'.format(args.data, args.kernel,
                                                      args.function)
    ultrametric_name = 'ultrametric_{0}_{1}_{2}'.format(
        args.data, args.kernel, args.function)
    var_name = 'var_{0}_{1}_{2}_{3}.pkl'.format(args.data, args.data,
                                                args.kernel, args.function)
    obj_name = 'obj_{0}_{1}_{2}_{3}.pkl'.format(args.data, args.data,
                                                args.kernel, args.function)
    laminar_name = 'laminar_{0}_{1}_{2}.pkl'.format(args.data, args.kernel,
                                                    args.function)
    tree_name = 'ip_tree_{0}_{1}_{2}.pdf'.format(args.data, args.kernel,
                                                 args.function)
    if args.kernel == 'cosine':
        y = pdist(data, metric='cosine')
        # Make condensed distance matrix into redundant form
        similarity = 1 - y
        similarity = squareform(similarity)
    if args.kernel == 'gaussian':
        y = pdist(data, metric='sqeuclidean')
        s = 1
        y = 1 - np.exp(-(y**2) / (2 * s**2))
        # Make condensed distance matrix into redundant form
        similarity = 1 - y
        similarity = squareform(similarity)
    if args.kernel == 'sqeuclidean':
        y = pdist(data, metric='sqeuclidean')
        similarity = -y
        similarity = squareform(similarity)
    if args.function == 'linear':
        m = init_model(data, similarity, target, utils.linear)
    elif args.function == 'quadratic':
        m = init_model(data, similarity, target, utils.quadratic)
    elif args.function == 'cubic':
        m = init_model(data, similarity, target, utils.cubic)
    elif args.function == 'exponential':
        m = init_model(data, similarity, target, utils.exponential)
    elif args.function == 'logarithm':
        m = init_model(data, similarity, target, utils.logarithm)
    else:
        exit(0)
    print('Saving model')
    m.write(model_name)
    # Use concurrent optimization
    m.params.method = 3
    # Limit memory
    m.params.NodeFileStart = 10
    # Limit number of threads
    m.params.Threads = args.num_threads
    # Set MIP Focus
    m.params.MIPFocus = 3
    # Tune parameters
    print('Tuning parameters')
    m.params.tuneResults = 1
    m.tune()
    if m.tuneResultCount > 0:
        m.getTuneResult(0)
    # Set MIP Gap
    m.params.MIPGap = 0.01
    print('Saving model parameters')
    m.write(param_name)
    print('Saving objective functions')
    with open(obj_name, 'wb') as f:
        pickle.dump(m._obj, f)
    print('Optimizing over model')
    m._n = data.shape[0]
    m.optimize(callback_function)
    if m.status == GRB.Status.OPTIMAL:
        # Write solution
        m.write(solution_name)
        print('Check binary triangle for solution: ', check_binary_triangle(m))

        # Get ultrametric
        if args.function == 'linear':
            d = get_ultrametric(m, utils.linear)
            utils.inverse_ultrametric(d, utils.inverse_linear)
        elif args.function == 'quadratic':
            d = get_ultrametric(m, utils.quadratic)
            utils.inverse_ultrametric(d, utils.inverse_quadratic)
        elif args.function == 'cubic':
            d = get_ultrametric(m, utils.cubic)
            utils.inverse_ultrametric(d, utils.inverse_cubic)
        elif args.function == 'exponential':
            d = get_ultrametric(m, utils.exponential)
            utils.inverse_ultrametric(d, utils.inverse_exponential)
        elif args.function == 'logarithm':
            d = get_ultrametric(m, utils.logarithm)
            utils.inverse_ultrametric(d, utils.inverse_logarithm)

        print('d = ', d)
        print('Check ultrametric: ', utils.check_ultrametric(d))
        cost = utils.get_cost(m, d)
        print('Cost of hierarchy = ', cost)
        total_obj = utils.get_total(m)
        print('Total cost = ', total_obj)
        print('Scaled cost = ', cost / total_obj)

        # Complete ultrametric
        utils.complete_ultrametric(d)

        # Build laminar list from d
        print('building laminar list')
        L = utils.build_laminar_list(d)
        print('L = ', L)
        print('Check laminar: ', utils.test_laminar(L))
        labels = [1] * m._n
        one_target = map(lambda x: x + 1, target)

        # Prune laminar list
        pruned = utils.prune(L, one_target, args.prune, labels)
        print('Error on pruning: ', pruned[0])
        with open(ultrametric_name, 'wb') as f:
            pickle.dump(d, f)

        # Build hierarchy
        print('Building hierarchy')
        G = utils.build_hierarchy(d)

        # Draw hierarchy
        print('Drawing hierarchy to ', tree_name)
        utils.draw(G, target, m._n, tree_name)
    elif m.status == GRB.Status.INFEASIBLE:
        # Compute IIS, for debugging purposes
        m.computeIIS()
        m.write('infeasible.ilp')