Beispiel #1
0
def grafoCompleto(size):
    if size <= 0:
        raise ValueError("o tamanho do grafo deve ser um número positivo")
    g = Grafo()
    g.add_vertice("0")
    for i in range(1, size):
        for j in g.vertices:
            g.add_aresta(str(i), str(j))
    return g
Beispiel #2
0
def main():

    g1 = Grafo(False)  # gera um grafo sem direção
    v1 = Vertice("a", 12)  # cria o vérice v1
    g1.add_vertice(v1)  # insere o vérice v1 no grafo g1

    v2 = Vertice("b", 13)  # cria o vértice v2
    g1.add_vertice(v2)  # insere o vértice v2 no grafo g1

    v3 = Vertice("c", 6)  # cria o vértice v3
    g1.add_vertice(v3)  # insere o cértice v3 no grafo v3

    a1 = Aresta(v1, v2, "a1", 2)  # cria uma aresta entre v1 e v2
    a2 = Aresta(v2, v3, "a2", 2)  # cria uma aresta entre v2 e v3
    a3 = Aresta(v3, v1, "a3", 2)  # cria uma aresta entre v3 e v1

    # corrigir
    g1.add_aresta(a1)  # insere a aresta a1 no grafo
    g1.add_aresta(a2)  # insere a aresta a2 no grafo
    g1.add_aresta(a3)  # insere a aresta a3 no grafo

    print(g1)
    g1.getFTD(v1)
    g1.dfs()
Beispiel #3
0
def grafoNaoCordal(size):
    g = Grafo()
    if (size < 4):
        raise ValueError("Qualquer grafo com menos de 4 vértices é cordal")

    # criando ciclo de tamanho 4 (subgrafo proibido para tornar o grafo não cordal)
    for i in range(0, 4):
        if (i == 3):
            g.add_aresta(str(i), str(0))

        else:
            g.add_aresta(str(i), str(i + 1))

    # Inserindo novos vértices considerando o tamanho definido pelo usuário
    for i in range(4, size):
        for j in g.vertices:
            g.add_aresta(str(i), str(j))

    return g
def colocation_pairs_optimal(queue, degradation_limit):

    try:
        import sys

        import pickle

        #Depois colocar esse "/opt/slurm/lib/degradation_model/" pra
        #ser pego da variável de ambiente
        sys.path.insert(0, '/opt/slurm/lib/degradation_model/graph/')
        #Arquivo apenas necessário para propósitos de debug
        #with open('/tmp/PYTHON_PATH.txt', 'a') as f:
        #	print >> f, 'Filename:', sys.path  # Python 2.x
        #Importing graph structure
        from grafo import Grafo
        from blossom_min import *

        grafo = Grafo()
        schedule = []
        apps_counters = {}
        joblist = []

        #creating dictionary for building degradation graph
        for job in queue:
            jobid = job[0]
            apps_counters[jobid] = job[1]
            joblist.append(jobid)

        #Load machine learning model
        #loaded_model = pickle.load(open("linear_regression.sav", 'rb'))
        loaded_model = pickle.load(
            open("/opt/slurm/lib/degradation_model/mlpregressor.sav", 'rb'))
        #Load scaling used on training fase
        scaling_model = pickle.load(
            open("/opt/slurm/lib/degradation_model/scaling.sav", 'rb'))

        #For each job create degradation graph
        for jobMain in joblist:
            for jobSecond in joblist:
                if (jobMain != jobSecond):
                    prediction = apps_counters[jobMain] + apps_counters[
                        jobSecond]
                    prediction_normalized = scaling_model.transform(
                        [prediction])
                    degradationMain = loaded_model.predict(
                        prediction_normalized)

                    prediction = apps_counters[jobSecond] + apps_counters[
                        jobMain]
                    prediction_normalized = scaling_model.transform(
                        [prediction])
                    degradationSecond = loaded_model.predict(
                        prediction_normalized)

                    #print("[{r1} {r2}]{r3} {r4}".format(r1=jobMain, r2=jobSecond, r3=degradationMain, r4=degradationSecond))
                    #grafo.add_aresta(str(jobMain), str(jobSecond), max(degradationMain, degradationSecond))
                    grafo.add_aresta(
                        jobMain, jobSecond,
                        max(degradationMain[0], degradationSecond[0]))

        #Apply minimum weight perfect matching to find optimal co-schedule
        blossom = min_weight_matching(grafo)

        #Creating Output
        schedule_s = []
        joblist_pairs = blossom.keys()
        with open('/tmp/SLURM_PYTHON_SCHEDULE_DEBUG.txt', 'a') as f:
            print >> f, 'EXECUTION:'  # Python 2.x
            print >> f, 'PAIRS CREATED: ', blossom  # Python 2.x
            for key in joblist_pairs:
                jobid1 = key
                jobid2 = blossom[key][0]
                degradation = blossom[key][1]
                if (degradation > degradation_limit):
                    schedule.append([jobid1])
                    schedule.append([jobid2])
                else:
                    schedule.append(sorted((jobid1, jobid2)))

            schedule_s = sorted(schedule, key=lambda tup: tup[0])
            print >> f, 'FINAL RESULT: ', schedule_s  # Python 2.x
        f.closed

        return schedule_s

    except Exception, e:
        with open('/tmp/SLURM_PYTHON_ERROR.txt', 'a') as f:
            print >> f, 'Filename:', type(e)  # Python 2.x
            print >> f, 'Filename:', str(e)  # Python 2.x
            print >> f, 'queue:', queue  # Python 2.x
            print >> f, 'Filename:', type(queue)  # Python 2.x
            for job in queue:
                print >> f, 'type job[0]', type(job[0])
                print >> f, 'type job[1]', type(job[1])
                print >> f, '++++++++++++++++++++++++:'  # Python 2.x
            print >> f, 'degradation_limit:', degradation_limit  # Python 2.x
            print >> f, 'Filename:', type(degradation_limit)  # Python 2.x
            #print('Filename:', str(e), file=f)  # Python 3.x
        f.closed