Beispiel #1
0
def sa(model,
       p=sa_default_prob,
       threshold=0.001,
       max_tries=1000,
       optimal='low'):

    best_can = model.gen_candidate()
    while best_can is None:
        best_can = model.gen_candidate()

    normalize = prerun(model)
    best_score = normalize(model.aggregate(best_can))

    curr_can = best_can
    curr_score = best_score

    out = []

    for i in range(0, max_tries):

        if i % 50 == 0:
            out += ["\n" + str(best_score) + " "]

        new_can = model.gen_candidate()
        if new_can is None:
            out += ["."]
            continue
        new_score = normalize(model.aggregate(new_can))

        if optimal == 'low':
            flag = True
            if new_score < best_score:
                best_score = new_score
                best_can = new_can
                out += ["!"]
                flag = False

            if new_score < curr_score:
                curr_score = new_score
                curr_can = new_can
                out += ["+"]
                flag = False

            elif p(curr_score, new_score, i / max_tries) < random.random():
                curr_score = new_score
                curr_can = new_can
                out += ["?"]
                flag = False

            if best_score < threshold:
                break

            if flag is True:
                out += "."

    print ''.join(out)
    print "\niterations:" + str(i + 1)
    print "Score:" + str(best_score)
    return best_can, best_score
Beispiel #2
0
def sa(model, p=sa_default_prob, threshold=0.001, max_tries=1000, optimal='low'):

    best_can = model.gen_candidate()
    while best_can is None:
        best_can = model.gen_candidate()

    normalize = prerun(model)
    best_score = normalize(model.aggregate(best_can))

    curr_can = best_can
    curr_score = best_score

    out = []

    for i in range(0, max_tries):

        if i % 50 == 0:
            out += ["\n" + str(best_score) + " "]

        new_can = model.gen_candidate()
        if new_can is None:
            out += ["."]
            continue
        new_score = normalize(model.aggregate(new_can))

        if optimal == 'low':
            flag = True
            if new_score < best_score:
                best_score = new_score
                best_can = new_can
                out += ["!"]
                flag = False

            if new_score < curr_score:
                curr_score = new_score
                curr_can = new_can
                out += ["+"]
                flag = False

            elif p(curr_score, new_score, i / max_tries) < random.random():
                curr_score = new_score
                curr_can = new_can
                out += ["?"]
                flag = False

            if best_score < threshold:
                break

            if flag is True:
                out += "."

    print ''.join(out)
    print "\niterations:" + str(i + 1)
    print "Score:" + str(best_score)
    return best_can, best_score
Beispiel #3
0
def sa(model, p=sa_default_prob, threshold=0.001, max_tries=1000, lives=5, era_size=50, era0=None):

    normalize = prerun(model)

    aggregate = model.aggregate

    def n_score(can):
        return normalize(aggregate(can))

    # if energy of can1 is less than that of can2
    # can1 is better and this returns true
    def type1(can1, can2):
        return (n_score(can1) < n_score(can2))

    def type2(era1, era2):
        # a12 returns times that lst1 is greater than lst2
        total = 0
        n = 0
        for obj_scores1, obj_scores2 in zip(era1, era2):
            # If this is 1, that means era1 is greater more often
            # If minimizing, this means era1 is worse
            total += a12(obj_scores1, obj_scores2)
            n += 1
        return (total / n >= 0.5)

    # This stores a list of era entries, i.e a list of  [list of objective scores for every candidate in the era]
    # Assume one era is 5 candidates
    # One Era entry for a model with 2 objectives: [[0.5,0.5], [0.2,0.3], [0.5,0.5], [0.2,0.3], [0.5,0.5]]
    # All era entries will be stored in eras (Assume 2 eras): [[[0.5,0.5], [0.2,0.3], [0.5,0.5], [0.2,0.3], [0.5,0.5]],
    #                                                         [[0.5,0.5], [0.2,0.3], [0.5,0.5], [0.2,0.3], [0.5,0.5]]]

    if not era0:
        best_can = model.gen_candidate()
        while best_can is None:
            best_can = model.gen_candidate()

        best_score = n_score(best_can)
        curr_can = best_can
        curr_score = best_score
        curr_era = []

    else:
        # List of List. Need to deepcopy internal list too
        curr_era = []
        era0_copy = list(era0)
        for can in era0_copy:
            curr_era += []
            model.eval(can)
            obj_scores = [x for x in can.scores]
            curr_era += [obj_scores]

        best_can = era0_copy[0]
        for can in era0_copy:
            if type1(best_can, can):
                best_can = can
        curr_can = era0_copy[len(era0_copy) - 1]
        curr_score = n_score(curr_can)

    best_score = n_score(best_can)

    out = []

    eras = []
    curr_lives = lives
    i = -1

    # If in `lives` eras there is no improvement, EXIT
    # If iterations > max_tries, EXIT
    # If n_score <threshold, EXIT

    while True:
        i += 1

        if i == max_tries:
            out += ["\nReached max tries"]
            if curr_era:
                eras += [curr_era]
                curr_era = []
            break

        # Beginning of a new ERA
        if i % era_size == 0:
            out += ["\n" + str(best_score) + " "]
            if curr_era:
                eras += [curr_era]
                curr_era = []
                if len(eras) > 1:
                    last_index = len(eras) - 1
                    # If there is improvement reset lives, else decrement
                    if (type2(eras[last_index - 1], eras[last_index])):
                        curr_lives = lives
                    else:
                        curr_lives -= 1
                        if curr_lives == 0:
                            out += ["\nNo more Lives"]
                            break

        new_can = model.gen_candidate()
        if new_can is None:
            out += ["."]
            model.eval(curr_can)
            obj_scores = [x for x in curr_can.scores]
            curr_era += [obj_scores]
            continue

        model.eval(new_can)
        obj_scores = [x for x in new_can.scores]
        curr_era += [obj_scores]

        new_score = n_score(new_can)

        flag = True
        if new_score < best_score:
            best_score = new_score
            best_can = new_can
            out += ["!"]
            flag = False

        if new_score < curr_score:
            curr_score = new_score
            curr_can = new_can
            out += ["+"]
            flag = False

        elif p(curr_score, new_score, i / max_tries) < random.random():
            curr_score = new_score
            curr_can = new_can
            out += ["?"]
            flag = False

        if best_score < threshold:
            out += ["\nScore satisfies Threshold"]
            break

        if flag is True:
            out += "."

    if curr_era:
        eras += [curr_era]

    print ''.join(out)
    print "\niterations:" + str(i)
    print "Score:" + str(best_score)
    return best_can, best_score
Beispiel #4
0
def mws(model, p=0.5, threshold=0.001, max_tries=100, max_changes=10, era_size=10, era0=None, lives=5):

    best_can = None

    normalize = prerun(model)
    aggregate = model.aggregate

    def n_score(can):
        return normalize(aggregate(can))

    # if energy of can1 is less than that of can2
    # can1 is better and this returns true
    def type1(can1, can2):
        return (n_score(can1) < n_score(can2))

    def type2(era1, era2):
        # a12 returns times that lst1 is greater than lst2
        total = 0
        n = 0
        for obj_scores1, obj_scores2 in zip(era1, era2):
            # If this is 1, that means new one is worse
            total += a12(obj_scores1, obj_scores2)
            n += 1
        return (total / n >= 0.5)

    best_score = 1.0

    if not era0:
        curr_era = []
        curr_lives = lives

    else:
        # List of List. Need to deepcopy internal list too
        curr_era = []
        era0_copy = list(era0)
        for can in era0_copy:
            curr_era += []
            model.eval(can)
            obj_scores = [x for x in can.scores]
            curr_era += [obj_scores]

        best_can = era0_copy[0]
        for can in era0_copy:
            can_score = n_score(can)
            if can_score < n_score(best_can):
                best_can = can
                best_score = can_score

    # This stores a list of era entries, i.e a list of  [list of objective scores for every candidate in the era]
    # Assume one era is 5 candidates
    # One Era entry for a model with 2 objectives: [[0.5,0.5], [0.2,0.3], [0.5,0.5], [0.2,0.3], [0.5,0.5]]
    # All era entries will be stored in eras (Assume 2 eras): [[[0.5,0.5], [0.2,0.3], [0.5,0.5], [0.2,0.3], [0.5,0.5]],
    #                                                         [[0.5,0.5], [0.2,0.3], [0.5,0.5], [0.2,0.3], [0.5,0.5]]]
    eras = []

    out = []

    candidate = None

    i = -1
    thresh_flag = False

    while not thresh_flag:
        i += 1
        j = 0
        if i == max_tries:
            if curr_era:
                eras += [curr_era]
                curr_era = []
            out += ["\nReached Max Tries"]
            break

        if i % era_size == 0:
            out += ["\n" + str(best_score) + " "]
            if curr_era:
                eras += [curr_era]
                curr_era = []
                if len(eras) > 1:
                    last_index = len(eras) - 1
                    # If there is improvement reset lives, else decrement
                    if (type2( eras[last_index - 1], eras[last_index])):
                        curr_lives = lives
                    else:
                        curr_lives -= 1
                        if curr_lives == 0:
                            out += ["\nNo more Lives"]
                            break

        if candidate is not None:
            prev_candidate = candidate

        if i == 0:
            while candidate is None:
                candidate = model.gen_candidate()
        else:
            candidate = model.gen_candidate()

        # could not generate a valid candidate after patience tries
        if candidate is None:
            out += ["."]
            model.eval(prev_candidate)
            obj_scores = [x for x in prev_candidate.scores]
            curr_era += [obj_scores]
            continue

        if best_can is None:
            best_can = candidate
            best_score = n_score(candidate)

        for j in range(0, max_changes):
            model.eval(candidate)
            score = n_score(candidate)

            model.eval(candidate)
            obj_scores = [x for x in candidate.scores]
            curr_era += [obj_scores]

            if score < best_score:
                out += ["!"]
                best_can = candidate
                best_score = score

            if best_score < threshold:
                if curr_era:
                    eras += [curr_era]
                    curr_era = []
                out += ["\nScore satisfies threshold"]
                thresh_flag = True
                break

            # choose a random decision
            c = random.randrange(0, len(model.decs))

            if p < random.random():
                # change the decision randomly
                # ensure it is valid
                patience = model.patience
                while(patience > 0):
                    new_can = Candidate(dec_vals=list(candidate.dec_vals))
                    new_can.dec_vals[c] = model.decs[c].generate_valid_val()
                    if model.ok(new_can):
                        candidate = new_can
                        out += ["?"]
                        break
                    patience -= 1
                if patience == 0:
                    out += ["."]

            else:
                orig_score = n_score(candidate)
                candidate = mws_optimize(model, candidate, c)
                new_score = normalize(model.aggregate(candidate))
                if orig_score != new_score:
                    out += ["+"]
                else:
                    out += ["."]

    print ''.join(out)
    print "\niterations:" + str(max_changes * i + j)
    print "Best Score:" + str(normalize(model.aggregate(best_can)))

    return best_can, normalize(model.aggregate(best_can))
Beispiel #5
0
def mws(model, p=0.5, threshold=0.001, max_tries=100, max_changes=10, optimal='low'):

    best_can = None

    normalize = prerun(model)

    out = []

    if optimal == 'low':
        best_score = 1.0
    else:
        best_score = 0.0

    for i in range(0, max_tries):
        candidate = model.gen_candidate()

        if i % 10 == 0:
            out += ["\n" + str(best_score) + " "]

        # could not generate a valid candidate after patience tries
        if candidate is None:
            out += ["."]
            continue

        if best_can is None:
            best_can = candidate
            best_score = normalize(model.aggregate(candidate))

        for j in range(0, max_changes):
            model.eval(candidate)
            score = normalize(model.aggregate(candidate))

            if optimal == 'low':
                if score < threshold:
                    out += ["\niterations:" + str(i * max_changes + j)]
                    out += ["Score:" + str(score)]
                    print ''.join(out)
                    return candidate, score

                if score < best_score:
                    out += ["!"]
                    best_can = candidate
                    best_score = score

            else:
                if score > threshold:
                    out += ["iterations:" + str(i * max_changes + j)]
                    out += ["Score:" + str(score)]
                    print ''.join(out)
                    return candidate, score

                if score > best_score:
                    out += ["!"]
                    best_can = candidate
                    best_score = score

            # choose a random decision
            c = random.randrange(0, len(model.decs))

            if p < random.random():
                # change the decision randomly
                # ensure it is valid
                patience = model.patience
                while(patience > 0):
                    new_can = Candidate(dec_vals=list(candidate.dec_vals))
                    new_can.dec_vals[c] = model.decs[c].generate_valid_val()
                    if model.ok(new_can):
                        candidate = new_can
                        out += ["?"]
                        break
                    patience -= 1
                if patience == 0:
                    out += ["."]

            else:
                orig_score = normalize(model.aggregate(candidate))
                candidate = mws_optimize(model, candidate, c, optimal)
                new_score = normalize(model.aggregate(candidate))
                if orig_score != new_score:
                    out += ["+"]
                else:
                    out += ["."]

    print ''.join(out)

    print "\niterations:" + str(max_changes * max_tries)
    print "Best Score:" + str(normalize(model.aggregate(best_can)))

    return best_can, normalize(model.aggregate(best_can))
Beispiel #6
0
def mws(model, p=0.5, threshold=0.001, max_tries=500, max_changes=10, era_size=100, era0=None, lives=5):

    best_can = None

    max_tries = max_tries / max_changes

    normalize = prerun(model)
    aggregate = model.aggregate

    def n_score(can):
        return aggregate(can)
        # return normalize(aggregate(can))

    # if energy of can1 is less than that of can2
    # can1 is better and this returns true
    def type1(can1, can2):
        return (n_score(can1) < n_score(can2))

    # def type2(era1, era2):
    #     # a12 returns times that lst1 is greater than lst2
    #     total = 0
    #     n = 0
    #     for obj_scores1, obj_scores2 in zip(era1, era2):
    #         # If this is 1, that means new one is worse
    #         total += a12(obj_scores1, obj_scores2)
    #         n += 1
    #     return (total / n >= 0.5)
    def type2(era1, era2):
        # a12 returns times that lst1 is greater than lst2
        # total = 0
        # n = 0
        # for obj_scores1, obj_scores2 in zip(era1, era2):
        #     # If this is 1, that means era1 is greater more often
        #     # If minimizing, this means era1 is worse
        #     total += a12(obj_scores1, obj_scores2)
        #     n += 1
        # return (total / n >= 0.5)

        # Currently returns true if even one of the objectives have improved
        # print "here:" + str(len(era2))
        # print "*****#############*************"
        for index, objective in enumerate(era2):
            # print "comparing:\n" + str(era1[index])
            # print "and\n"
            # print str(objective)
            # print "******"
            a12_score = a12(era1[index], objective)
            # print "######"
            # print a12_score
            # print "######"
            if (a12_score >= 0.56):
                # print "######"
                # print objective
                # print era1[index]
                # print a12_score
                # print "######"
                return True
        # print "######"
        # print a12_score
        # print "######"
        return False

    # best_score = 1.0

    if not era0:
        print "ERRRRRRRRRRRRRRRRRRRRRRRRRRRROR"
        curr_era = [[] for _ in model.objectives()]
        curr_lives = lives

    else:
        # List of List. Need to deepcopy internal list too
        curr_lives = lives
        era0_copy = []
        for can in era0:
            new_can = Candidate(dec_vals=can.dec_vals, scores=can.scores)
            era0_copy += [new_can]
        curr_era = [[] for _ in model.objectives()]
        for can in era0_copy:
            model.eval(can)
            obj_scores = [x for x in can.scores]
            for index, score in enumerate(obj_scores):
                curr_era[index] += [score]

        best_can = era0_copy[0]
        best_score = n_score(best_can)
        for can in era0_copy:
            if type1(can, best_can):
                best_can = can
                best_score = n_score(best_can)
        # curr_can = era0_copy[len(era0_copy) - 1]
        # curr_score = n_score(curr_can)

    # This stores a list of era entries, i.e a list of  [list of objective scores for every candidate in the era]
    # Assume one era is 5 candidates
    # One Era entry for a model with 2 objectives: [[0.5,0.5], [0.2,0.3], [0.5,0.5], [0.2,0.3], [0.5,0.5]]
    # All era entries will be stored in eras (Assume 2 eras): [[[0.5,0.5], [0.2,0.3], [0.5,0.5], [0.2,0.3], [0.5,0.5]],
    #                                                         [[0.5,0.5], [0.2,0.3], [0.5,0.5], [0.2,0.3], [0.5,0.5]]]
    eras = []

    out = []

    candidate = None

    i = -1
    thresh_flag = False

    while not thresh_flag:
        # print "New Try"
        i += 1
        j = 0
        if i == max_tries:
            if curr_era:
                eras += [curr_era]
                # curr_era = [[] for _ in model.objectives()]
            # print "Reached max tries"
            out += ["\nReached Max Tries"]
            break

        if (i * max_changes) % era_size == 0:
            out += ["\n" + str(best_score) + " "]
            if curr_era:
                eras += [curr_era]
                # print len(curr_era[0])
                curr_era = [[] for _ in model.objectives()]
                if len(eras) > 1:
                    # print str(i)+":"+str(i*max_changes)+":lives", curr_lives
                    last_index = len(eras) - 1
                    # If there is improvement reset lives, else decrement
                    if (type2(eras[last_index - 1], eras[last_index])):
                        curr_lives += lives
                    else:
                        curr_lives -= 1
                        if curr_lives == 0:
                            # print "No more"
                            out += ["\nNo more Lives"]
                            break

        if candidate is not None:
            prev_candidate = candidate

        if i == 0:
            while candidate is None:
                candidate = model.gen_candidate()
        else:
            candidate = model.gen_candidate()

        # could not generate a valid candidate after patience tries
        if candidate is None:
            out += ["."]
            model.eval(prev_candidate)
            obj_scores = [x for x in prev_candidate.scores]
            for index, score in enumerate(obj_scores):
                curr_era[index] += [score]
            continue

        if best_can is None:
            best_can = candidate
            best_score = n_score(candidate)

        for j in range(0, max_changes):
            model.eval(candidate)
            score = n_score(candidate)
            # print score

            model.eval(candidate)
            obj_scores = [x for x in candidate.scores]
            for index, score in enumerate(obj_scores):
                curr_era[index] += [score]

            if type1(candidate, best_can):  # score < best_score:
                out += ["!"]
                best_can = candidate
                best_score = score

            # if best_score < threshold:
            #     if curr_era:
            #         eras += [curr_era]
            #         curr_era = [[] for _ in model.objectives()]
            #     out += ["\nScore satisfies threshold"]
            #     thresh_flag = True
            #     break

            # choose a random decision
            c = random.randrange(0, len(model.decs))

            if p < random.random():
                # change the decision randomly
                # ensure it is valid
                patience = model.patience
                while(patience > 0):
                    new_can = Candidate(dec_vals=list(candidate.dec_vals))
                    new_can.dec_vals[c] = model.decs[c].generate_valid_val()
                    if model.ok(new_can):
                        candidate = new_can
                        out += ["?"]
                        break
                    patience -= 1
                if patience == 0:
                    out += ["."]

            else:
                orig_score = n_score(candidate)
                candidate = mws_optimize(model, candidate, c, type1)
                new_score = normalize(model.aggregate(candidate))
                if orig_score != new_score:
                    out += ["+"]
                else:
                    out += ["."]

    # print ''.join(out)
    # print "\niterations:" + str(max_changes * i + j)
    # print "Best Score:" + str(normalize(model.aggregate(best_can)))

    # print eras[len(eras)-1]

    return best_can, model.aggregate(best_can), eras[len(eras) - 1]
Beispiel #7
0
def sa(model, p=sa_default_prob, threshold=0.001, max_tries=100000, lives=10, era_size=2000, era0=None):

    normalize = prerun(model)

    aggregate = model.aggregate

    def actual_n_score(can):
        return normalize(aggregate(can))

    def n_score(can):
        return aggregate(can)
        # return normalize(aggregate(can))

    # if energy of can1 is less than that of can2
    # can1 is better and this returns true
    def type1(can1, can2):
        # res = cdom(can1.dec_vals, can2.dec_vals)
        # # print "*************************************"
        # # print res
        # # print "*************************************"
        # if res == can1.dec_vals:
        #     # print "TrueTrueTrueTrueTrueTrueTrueTrueTrueTrueTrueTrueTrueTrueTrueTrueTrueTrueTrueTrueTrueTrueTrue"
        #     return True
        # else:
        #     return False
        return (n_score(can1) < n_score(can2))

    def type2(era1, era2):
        # a12 returns times that lst1 is greater than lst2
        # total = 0
        # n = 0
        # for obj_scores1, obj_scores2 in zip(era1, era2):
        #     # If this is 1, that means era1 is greater more often
        #     # If minimizing, this means era1 is worse
        #     total += a12(obj_scores1, obj_scores2)
        #     n += 1
        # return (total / n >= 0.5)

        # Currently returns true if even one of the objectives have improved
        # print "here:" + str(len(era2))
        # print "*****#############*************"
        for index, objective in enumerate(era2):
            # print "comparing:\n" + str(era1[index])
            # print "and\n"
            # print str(objective)
            # print "******"
            a12_score = a12(era1[index], objective)
            # print "######"
            # print a12_score
            # print "######"
            if (a12_score >= 0.56):
                # print "######"
                # print objective
                # print era1[index]
                # print a12_score
                # print "######"
                return True
        # print "######"
        # print a12_score
        # print "######"
        return False

    # One era is a list of size era_size
    # Each element is a list with all the values of an objective in that era
    # So basically: era = [[can1.obj1_score, can2.obj1_score],
    #                      [can1.obj2.score, can2.obj2.score]]

    if not era0:
        best_can = model.gen_candidate()
        while best_can is None:
            best_can = model.gen_candidate()

        best_score = n_score(best_can)
        curr_can = best_can
        curr_score = best_score
        curr_era = [[] for _ in model.objectives()]

    else:
        # List of List. Need to deepcopy internal list too
        era0_copy = []
        for can in era0:
            new_can = Candidate(dec_vals=can.dec_vals, scores=can.scores)
            era0_copy += [new_can]
        curr_era = [[] for _ in model.objectives()]
        for can in era0_copy:
            model.eval(can)
            obj_scores = [x for x in can.scores]
            for index, score in enumerate(obj_scores):
                curr_era[index] += [score]

        best_can = era0_copy[0]
        for can in era0_copy:
            if type1(can, best_can):
                best_can = can
        curr_can = era0_copy[len(era0_copy) - 1]
        curr_score = n_score(curr_can)

    best_score = n_score(best_can)

    out = []

    eras = []
    curr_lives = lives
    i = -1

    # If in `lives` eras there is no improvement, EXIT
    # If iterations > max_tries, EXIT
    # If n_score <threshold, EXIT

    gen_count = 0
    none_count = 0

    early_termination = False

    random_jumps = 0
    gt_50 = 0

    while True:
        i += 1

        if i == max_tries:
            out += ["\nReached max tries"]
            if curr_era:
                eras += [curr_era]
                curr_era = [[] for _ in model.objectives()]
            break

        # Beginning of a new ERA
        if i % era_size == 0:
            # print "Random Jumps this era:" + str(random_jumps)
            # print "gt_50 this era:" + str(gt_50)
            gt_50 = 0
            random_jumps = 0
            out += ["\n" + str(best_score) + " "]
            if curr_era:
                eras += [curr_era]
                curr_era = [[] for _ in model.objectives()]
                if len(eras) > 1:
                    last_index = len(eras) - 1
                    # If there is improvement reset lives, else decrement
                    if (type2(eras[last_index - 1], eras[last_index])):
                        curr_lives += lives
                    else:
                        curr_lives -= 1
                        if curr_lives == 0:
                            print "No more lives"
                            out += ["\nNo more Lives"]
                            early_termination = True
                            break

        new_can = model.gen_candidate()
        # new_can = model.gen_can_from_prev(curr_can)

        gen_count += 1
        if new_can is None:
            none_count += 1
            out += ["."]
            model.eval(curr_can)
            obj_scores = [x for x in curr_can.scores]
            for index, score in enumerate(obj_scores):
                curr_era[index] += [score]
            continue

        model.eval(new_can)
        obj_scores = [x for x in new_can.scores]
        for index, score in enumerate(obj_scores):
                curr_era[index] += [score]

        new_score = n_score(new_can)

        flag = True
        # if new_score < best_score:
        if type1(new_can, best_can):
            best_score = new_score
            best_can = new_can
            # print "!"
            out += ["!"]
            flag = False

        # if new_score < curr_score:
        norm_curr_score = actual_n_score(curr_can)
        norm_new_score = actual_n_score(new_can)
        prob = p(norm_curr_score, norm_new_score, ((i / max_tries)))
        # print "****************************"
        # print (norm_curr_score - norm_new_score)
        # print (i/max_tries)
        if(prob>0.5):
            gt_50 += 1
            # print str(prob)
        if type1(new_can, curr_can):
            # print "new can won"
            curr_score = new_score
            curr_can = new_can
            out += ["+"]
            flag = False

        elif prob < random.random():
            # print "random jump"
            curr_score = new_score
            curr_can = new_can
            out += ["?"]
            random_jumps += 1
            flag = False

        # else:
        #     print "_!_"

        if best_score < threshold:
            early_termination = True
            out += ["\nScore satisfies Threshold"]
            break

        if flag is True:
            out += "."

    if curr_era:
        eras += [curr_era]

    # for era in eras:
    #     print era
    #     print "******"
        # break
    # print eras[len(eras) - 2]

    # print len(eras[0])
    # print len(eras[len(eras) - 2])

    # print ''.join(out)
    # print "gen_count:" + str(gen_count)
    # print "none_count" + str(none_count)
    # print "\nLen (eras):" + str(len(eras))
    # print "\niterations:" + str(i)
    # print "Score:" + str(best_score)
    return best_can, best_score, eras[len(eras) - 2]
Beispiel #8
0
def sa(model,
       p=sa_default_prob,
       threshold=0.001,
       max_tries=100000,
       lives=10,
       era_size=2000,
       era0=None):

    normalize = prerun(model)

    aggregate = model.aggregate

    def actual_n_score(can):
        return normalize(aggregate(can))

    def n_score(can):
        return aggregate(can)
        # return normalize(aggregate(can))

    # if energy of can1 is less than that of can2
    # can1 is better and this returns true
    def type1(can1, can2):
        # res = cdom(can1.dec_vals, can2.dec_vals)
        # # print "*************************************"
        # # print res
        # # print "*************************************"
        # if res == can1.dec_vals:
        #     # print "TrueTrueTrueTrueTrueTrueTrueTrueTrueTrueTrueTrueTrueTrueTrueTrueTrueTrueTrueTrueTrueTrueTrue"
        #     return True
        # else:
        #     return False
        return (n_score(can1) < n_score(can2))

    def type2(era1, era2):
        # a12 returns times that lst1 is greater than lst2
        # total = 0
        # n = 0
        # for obj_scores1, obj_scores2 in zip(era1, era2):
        #     # If this is 1, that means era1 is greater more often
        #     # If minimizing, this means era1 is worse
        #     total += a12(obj_scores1, obj_scores2)
        #     n += 1
        # return (total / n >= 0.5)

        # Currently returns true if even one of the objectives have improved
        # print "here:" + str(len(era2))
        # print "*****#############*************"
        for index, objective in enumerate(era2):
            # print "comparing:\n" + str(era1[index])
            # print "and\n"
            # print str(objective)
            # print "******"
            a12_score = a12(era1[index], objective)
            # print "######"
            # print a12_score
            # print "######"
            if (a12_score >= 0.56):
                # print "######"
                # print objective
                # print era1[index]
                # print a12_score
                # print "######"
                return True
        # print "######"
        # print a12_score
        # print "######"
        return False

    # One era is a list of size era_size
    # Each element is a list with all the values of an objective in that era
    # So basically: era = [[can1.obj1_score, can2.obj1_score],
    #                      [can1.obj2.score, can2.obj2.score]]

    if not era0:
        best_can = model.gen_candidate()
        while best_can is None:
            best_can = model.gen_candidate()

        best_score = n_score(best_can)
        curr_can = best_can
        curr_score = best_score
        curr_era = [[] for _ in model.objectives()]

    else:
        # List of List. Need to deepcopy internal list too
        era0_copy = []
        for can in era0:
            new_can = Candidate(dec_vals=can.dec_vals, scores=can.scores)
            era0_copy += [new_can]
        curr_era = [[] for _ in model.objectives()]
        for can in era0_copy:
            model.eval(can)
            obj_scores = [x for x in can.scores]
            for index, score in enumerate(obj_scores):
                curr_era[index] += [score]

        best_can = era0_copy[0]
        for can in era0_copy:
            if type1(can, best_can):
                best_can = can
        curr_can = era0_copy[len(era0_copy) - 1]
        curr_score = n_score(curr_can)

    best_score = n_score(best_can)

    out = []

    eras = []
    curr_lives = lives
    i = -1

    # If in `lives` eras there is no improvement, EXIT
    # If iterations > max_tries, EXIT
    # If n_score <threshold, EXIT

    gen_count = 0
    none_count = 0

    early_termination = False

    random_jumps = 0
    gt_50 = 0

    while True:
        i += 1

        if i == max_tries:
            out += ["\nReached max tries"]
            if curr_era:
                eras += [curr_era]
                curr_era = [[] for _ in model.objectives()]
            break

        # Beginning of a new ERA
        if i % era_size == 0:
            # print "Random Jumps this era:" + str(random_jumps)
            # print "gt_50 this era:" + str(gt_50)
            gt_50 = 0
            random_jumps = 0
            out += ["\n" + str(best_score) + " "]
            if curr_era:
                eras += [curr_era]
                curr_era = [[] for _ in model.objectives()]
                if len(eras) > 1:
                    last_index = len(eras) - 1
                    # If there is improvement reset lives, else decrement
                    if (type2(eras[last_index - 1], eras[last_index])):
                        curr_lives += lives
                    else:
                        curr_lives -= 1
                        if curr_lives == 0:
                            print "No more lives"
                            out += ["\nNo more Lives"]
                            early_termination = True
                            break

        new_can = model.gen_candidate()
        # new_can = model.gen_can_from_prev(curr_can)

        gen_count += 1
        if new_can is None:
            none_count += 1
            out += ["."]
            model.eval(curr_can)
            obj_scores = [x for x in curr_can.scores]
            for index, score in enumerate(obj_scores):
                curr_era[index] += [score]
            continue

        model.eval(new_can)
        obj_scores = [x for x in new_can.scores]
        for index, score in enumerate(obj_scores):
            curr_era[index] += [score]

        new_score = n_score(new_can)

        flag = True
        # if new_score < best_score:
        if type1(new_can, best_can):
            best_score = new_score
            best_can = new_can
            # print "!"
            out += ["!"]
            flag = False

        # if new_score < curr_score:
        norm_curr_score = actual_n_score(curr_can)
        norm_new_score = actual_n_score(new_can)
        prob = p(norm_curr_score, norm_new_score, ((i / max_tries)))
        # print "****************************"
        # print (norm_curr_score - norm_new_score)
        # print (i/max_tries)
        if (prob > 0.5):
            gt_50 += 1
            # print str(prob)
        if type1(new_can, curr_can):
            # print "new can won"
            curr_score = new_score
            curr_can = new_can
            out += ["+"]
            flag = False

        elif prob < random.random():
            # print "random jump"
            curr_score = new_score
            curr_can = new_can
            out += ["?"]
            random_jumps += 1
            flag = False

        # else:
        #     print "_!_"

        if best_score < threshold:
            early_termination = True
            out += ["\nScore satisfies Threshold"]
            break

        if flag is True:
            out += "."

    if curr_era:
        eras += [curr_era]

    # for era in eras:
    #     print era
    #     print "******"
    # break
    # print eras[len(eras) - 2]

    # print len(eras[0])
    # print len(eras[len(eras) - 2])

    # print ''.join(out)
    # print "gen_count:" + str(gen_count)
    # print "none_count" + str(none_count)
    # print "\nLen (eras):" + str(len(eras))
    # print "\niterations:" + str(i)
    # print "Score:" + str(best_score)
    return best_can, best_score, eras[len(eras) - 2]
Beispiel #9
0
def mws(model,
        p=0.5,
        threshold=0.001,
        max_tries=500,
        max_changes=10,
        era_size=100,
        era0=None,
        lives=5):

    best_can = None

    max_tries = max_tries / max_changes

    normalize = prerun(model)
    aggregate = model.aggregate

    def n_score(can):
        return aggregate(can)
        # return normalize(aggregate(can))

    # if energy of can1 is less than that of can2
    # can1 is better and this returns true
    def type1(can1, can2):
        return (n_score(can1) < n_score(can2))

    # def type2(era1, era2):
    #     # a12 returns times that lst1 is greater than lst2
    #     total = 0
    #     n = 0
    #     for obj_scores1, obj_scores2 in zip(era1, era2):
    #         # If this is 1, that means new one is worse
    #         total += a12(obj_scores1, obj_scores2)
    #         n += 1
    #     return (total / n >= 0.5)
    def type2(era1, era2):
        # a12 returns times that lst1 is greater than lst2
        # total = 0
        # n = 0
        # for obj_scores1, obj_scores2 in zip(era1, era2):
        #     # If this is 1, that means era1 is greater more often
        #     # If minimizing, this means era1 is worse
        #     total += a12(obj_scores1, obj_scores2)
        #     n += 1
        # return (total / n >= 0.5)

        # Currently returns true if even one of the objectives have improved
        # print "here:" + str(len(era2))
        # print "*****#############*************"
        for index, objective in enumerate(era2):
            # print "comparing:\n" + str(era1[index])
            # print "and\n"
            # print str(objective)
            # print "******"
            a12_score = a12(era1[index], objective)
            # print "######"
            # print a12_score
            # print "######"
            if (a12_score >= 0.56):
                # print "######"
                # print objective
                # print era1[index]
                # print a12_score
                # print "######"
                return True
        # print "######"
        # print a12_score
        # print "######"
        return False

    # best_score = 1.0

    if not era0:
        print "ERRRRRRRRRRRRRRRRRRRRRRRRRRRROR"
        curr_era = [[] for _ in model.objectives()]
        curr_lives = lives

    else:
        # List of List. Need to deepcopy internal list too
        curr_lives = lives
        era0_copy = []
        for can in era0:
            new_can = Candidate(dec_vals=can.dec_vals, scores=can.scores)
            era0_copy += [new_can]
        curr_era = [[] for _ in model.objectives()]
        for can in era0_copy:
            model.eval(can)
            obj_scores = [x for x in can.scores]
            for index, score in enumerate(obj_scores):
                curr_era[index] += [score]

        best_can = era0_copy[0]
        best_score = n_score(best_can)
        for can in era0_copy:
            if type1(can, best_can):
                best_can = can
                best_score = n_score(best_can)
        # curr_can = era0_copy[len(era0_copy) - 1]
        # curr_score = n_score(curr_can)

    # This stores a list of era entries, i.e a list of  [list of objective scores for every candidate in the era]
    # Assume one era is 5 candidates
    # One Era entry for a model with 2 objectives: [[0.5,0.5], [0.2,0.3], [0.5,0.5], [0.2,0.3], [0.5,0.5]]
    # All era entries will be stored in eras (Assume 2 eras): [[[0.5,0.5], [0.2,0.3], [0.5,0.5], [0.2,0.3], [0.5,0.5]],
    #                                                         [[0.5,0.5], [0.2,0.3], [0.5,0.5], [0.2,0.3], [0.5,0.5]]]
    eras = []

    out = []

    candidate = None

    i = -1
    thresh_flag = False

    while not thresh_flag:
        # print "New Try"
        i += 1
        j = 0
        if i == max_tries:
            if curr_era:
                eras += [curr_era]
                # curr_era = [[] for _ in model.objectives()]
            # print "Reached max tries"
            out += ["\nReached Max Tries"]
            break

        if (i * max_changes) % era_size == 0:
            out += ["\n" + str(best_score) + " "]
            if curr_era:
                eras += [curr_era]
                # print len(curr_era[0])
                curr_era = [[] for _ in model.objectives()]
                if len(eras) > 1:
                    # print str(i)+":"+str(i*max_changes)+":lives", curr_lives
                    last_index = len(eras) - 1
                    # If there is improvement reset lives, else decrement
                    if (type2(eras[last_index - 1], eras[last_index])):
                        curr_lives += lives
                    else:
                        curr_lives -= 1
                        if curr_lives == 0:
                            # print "No more"
                            out += ["\nNo more Lives"]
                            break

        if candidate is not None:
            prev_candidate = candidate

        if i == 0:
            while candidate is None:
                candidate = model.gen_candidate()
        else:
            candidate = model.gen_candidate()

        # could not generate a valid candidate after patience tries
        if candidate is None:
            out += ["."]
            model.eval(prev_candidate)
            obj_scores = [x for x in prev_candidate.scores]
            for index, score in enumerate(obj_scores):
                curr_era[index] += [score]
            continue

        if best_can is None:
            best_can = candidate
            best_score = n_score(candidate)

        for j in range(0, max_changes):
            model.eval(candidate)
            score = n_score(candidate)
            # print score

            model.eval(candidate)
            obj_scores = [x for x in candidate.scores]
            for index, score in enumerate(obj_scores):
                curr_era[index] += [score]

            if type1(candidate, best_can):  # score < best_score:
                out += ["!"]
                best_can = candidate
                best_score = score

            # if best_score < threshold:
            #     if curr_era:
            #         eras += [curr_era]
            #         curr_era = [[] for _ in model.objectives()]
            #     out += ["\nScore satisfies threshold"]
            #     thresh_flag = True
            #     break

            # choose a random decision
            c = random.randrange(0, len(model.decs))

            if p < random.random():
                # change the decision randomly
                # ensure it is valid
                patience = model.patience
                while (patience > 0):
                    new_can = Candidate(dec_vals=list(candidate.dec_vals))
                    new_can.dec_vals[c] = model.decs[c].generate_valid_val()
                    if model.ok(new_can):
                        candidate = new_can
                        out += ["?"]
                        break
                    patience -= 1
                if patience == 0:
                    out += ["."]

            else:
                orig_score = n_score(candidate)
                candidate = mws_optimize(model, candidate, c, type1)
                new_score = normalize(model.aggregate(candidate))
                if orig_score != new_score:
                    out += ["+"]
                else:
                    out += ["."]

    # print ''.join(out)
    # print "\niterations:" + str(max_changes * i + j)
    # print "Best Score:" + str(normalize(model.aggregate(best_can)))

    # print eras[len(eras)-1]

    return best_can, model.aggregate(best_can), eras[len(eras) - 1]