コード例 #1
0
ファイル: optimizers.py プロジェクト: rchakra3/x9115rc3
def mws_optimize(model, candidate, dec_index, optimization, tries=50):

    best_can = candidate
    model.eval(best_can)
    best_score = model.aggregate(best_can)

    # print "Started with " + str(best_score)

    for i in range(0, tries):
        new_can = Candidate(dec_vals=list(candidate.dec_vals))
        # This can be changed to use all possible values
        new_can.dec_vals[dec_index] = model.decs[dec_index].generate_valid_val()
        model.eval(new_can)
        score = model.aggregate(new_can)

        if optimization == 'low':
            # We want to lower score
            if score < best_score:
                best_score = score
                best_can = new_can

        else:
            if score > best_score:
                best_score = score
                best_can = new_can

    return best_can
コード例 #2
0
def de_extrapolate(frontier, can_index, cop, ea, decision_objs):

    can = frontier[can_index]
    new_can = Candidate(dec_vals=list(can.dec_vals))
    two, three, four = get_any_other_three(frontier, can_index)

    changed = False

    for d in range(len(can.dec_vals)):
        print d
        x, y, z = two.dec_vals[d], three.dec_vals[d], four.dec_vals[d]

        if random.random() < cop:
            changed = True
            new_can.dec_vals[d] = decision_objs[d].wrap(x + ea * (y - z))

    if not changed:
        d = random.randint(0, len(can.dec_vals) - 1)
        new_can.dec_vals[d] = two.dec_vals[d]

    if(new_can.dec_vals[d]<=0 or new_can.dec_vals[d]>=1):
        print "###########3"
        print "x,y,z :" + str(x)+","+str(y)+","+str(z)
        print new_can.dec_vals[d]
        print "##########3"

    return new_can
コード例 #3
0
ファイル: optimizers.py プロジェクト: rchakra3/x9115rc3
def mws_optimize(model, candidate, dec_index, optimization, tries=50):

    best_can = candidate
    model.eval(best_can)
    best_score = model.aggregate(best_can)

    # print "Started with " + str(best_score)

    for i in range(0, tries):
        new_can = Candidate(dec_vals=list(candidate.dec_vals))
        # This can be changed to use all possible values
        new_can.dec_vals[dec_index] = model.decs[dec_index].generate_valid_val(
        )
        model.eval(new_can)
        score = model.aggregate(new_can)

        if optimization == 'low':
            # We want to lower score
            if score < best_score:
                best_score = score
                best_can = new_can

        else:
            if score > best_score:
                best_score = score
                best_can = new_can

    return best_can
コード例 #4
0
ファイル: de.py プロジェクト: rchakra3/x9115rc3
def de_extrapolate(frontier, can_index, cop, ea):

    can = frontier[can_index]
    new_can = Candidate(dec_vals=list(can.dec_vals))
    two, three, four = get_any_other_three(frontier, can_index)

    changed = False

    for d in range(len(can.dec_vals)):
        x, y, z = two.dec_vals[d], three.dec_vals[d], four.dec_vals[d]

        if random.random() < cop:
            changed = True
            new_can.dec_vals[d] = x + ea * (y - z)

    if not changed:
        d = random.randint(0, len(can.dec_vals) - 1)
        new_can.dec_vals[d] = two.dec_vals[d]
    return new_can
コード例 #5
0
def de_extrapolate(frontier, can_index, cop, ea):

    can = frontier[can_index]
    new_can = Candidate(dec_vals=list(can.dec_vals))
    two, three, four = get_any_other_three(frontier, can_index)

    changed = False

    for d in range(len(can.dec_vals)):
        x, y, z = two.dec_vals[d], three.dec_vals[d], four.dec_vals[d]

        if random.random() < cop:
            changed = True
            new_can.dec_vals[d] = x + ea * (y - z)

    if not changed:
        d = random.randint(0, len(can.dec_vals) - 1)
        new_can.dec_vals[d] = two.dec_vals[d]
    return new_can
コード例 #6
0
def hve(frontier, min, max, type1, sample=100000):
    """estimate hyper volumn of frontier"""
    count = 0
    # print frontier[0]
    m=len(frontier[0].scores)
    for i in xrange(sample):
        pebble=[random.uniform(min[k],max[k]) for k in xrange(m)]
        pebble_can = Candidate(scores=pebble)
        if inbox(pebble_can,frontier,type1):
            count=count+1
    return count/(sample)
コード例 #7
0
ファイル: ga.py プロジェクト: rchakra3/x9115rc3
def crossover(indiv1, indiv2, cop):

    cross_points = []

    indiv_list1 = deque([indiv1, indiv2])
    indiv_list2 = deque([indiv2, indiv1])

    for _ in range(cop):
        cross_points += [random.randint(0, len(indiv1.dec_vals))]

    cross_points.sort()
    cross_point_index = 0

    i = 0
    take_one_from = indiv_list1.popleft()
    indiv_list1.append(take_one_from)

    take_two_from = indiv_list2.popleft()
    indiv_list2.append(take_two_from)

    child1 = Candidate(dec_vals=list(indiv1.dec_vals))
    child2 = Candidate(dec_vals=list(indiv1.dec_vals))

    while i < len(indiv1.dec_vals):
        if cross_point_index < len(cross_points) and i == cross_points[cross_point_index]:
            take_one_from = indiv_list1.popleft()
            indiv_list1.append(take_one_from)
            take_two_from = indiv_list2.popleft()
            indiv_list2.append(take_two_from)
            cross_point_index += 1

        child1.dec_vals[i] = take_one_from.dec_vals[i]
        child2.dec_vals[i] = take_two_from.dec_vals[i]
        i += 1

    return (child1, child2)
コード例 #8
0
def crossover(indiv1, indiv2, cop):

    cross_points = []

    indiv_list1 = deque([indiv1, indiv2])
    indiv_list2 = deque([indiv2, indiv1])

    for _ in range(cop):
        cross_points += [random.randint(0, len(indiv1.dec_vals))]

    cross_points.sort()
    cross_point_index = 0

    i = 0
    take_one_from = indiv_list1.popleft()
    indiv_list1.append(take_one_from)

    take_two_from = indiv_list2.popleft()
    indiv_list2.append(take_two_from)

    child1 = Candidate(dec_vals=list(indiv1.dec_vals))
    child2 = Candidate(dec_vals=list(indiv1.dec_vals))

    while i < len(indiv1.dec_vals):
        if cross_point_index < len(
                cross_points) and i == cross_points[cross_point_index]:
            take_one_from = indiv_list1.popleft()
            indiv_list1.append(take_one_from)
            take_two_from = indiv_list2.popleft()
            indiv_list2.append(take_two_from)
            cross_point_index += 1

        child1.dec_vals[i] = take_one_from.dec_vals[i]
        child2.dec_vals[i] = take_two_from.dec_vals[i]
        i += 1

    return (child1, child2)
コード例 #9
0
def ga(model, mp=0.1, cop=1, select=ga_select, population_size=100, num_generations=1000, attempts=5, initial_pop=None):

    # normalize = prerun(model)

    # aggregate = model.aggregate

    # def n_score(indiv):
    #     return normalize(aggregate(indiv))

    # Minimizing
    # If for every objective, indiv1.obj <= indiv2.obj
    # Then indiv1 is better if it's < in at least one
    def type1(indiv1, indiv2):
        # print str(indiv1.scores) + "compared to" + str(indiv2.scores)
        better_flag = False
        for i, score in enumerate(indiv1.scores):
            # if (indiv2.scores[i] < score):
            #     # print "Breaking"
            #     return False

            if (score < indiv2.scores[i]):
                # print "Not there"
                better_flag = True

            elif (score != indiv2.scores[i]):
                return False
            # else:
                # print str(score) + "," + str(indiv2.scores[i])
        # print "Return: " + str(better_flag)
        return better_flag

    obj_mins = [100000000 for _ in range(model.num_objs)]
    obj_maxs = [0 for _ in range(model.num_objs)]

    parents = []

    if initial_pop is None:
        parents = generate_random_population(model, population_size)
    else:
        for can in initial_pop:
            p = Candidate(dec_vals=can.dec_vals, scores=can.scores)
            parents += [p]

    frontier = []

    frontier = generate_frontier(frontier, parents, type1, obj_mins, obj_maxs)

    if len(frontier) == 0:
        frontier = [parents[0], parents[len(parents) - 1]]

    # count = 0

    # while len(frontier) < 1:
    #     frontier = generate_frontier(frontier, parents, type1)
    #     count += 1
    #     print "Attempt:" + str(count)

    lives = 5

    prev_era_frontier = frontier

    for gen_num in range(num_generations):

        children = []

        while len(children) < population_size:

            parent1 = select(parents, type1)
            parent2 = select(parents, type1, exclude=[parent1])

            child1, child2 = crossover(parent1, parent2, cop)
            mutate(child1, mp, model.decs, model.ok)
            mutate(child2, mp, model.decs, model.ok)

            model.eval(child1)
            model.eval(child2)

            if len(children) == population_size - 1:
                if type1(child1, child2):
                    children += [child2]
                else:
                    children += [child1]
            else:
                children += [child1, child2]

        parents = list(children)

        frontier = generate_frontier(frontier, parents, type1, obj_mins, obj_maxs)

        if (gen_num % 100) == 0:
            # print "here"
            if not better_frontier(frontier, prev_era_frontier, type1):
                    # print "WOAH"
                    lives -= 1
                    if lives == 0:
                        # print "Early termination"
                        break
            else:
                lives += 5
            prev_era_frontier = frontier
        # else:
        #     print gen_num
        # print len(frontier)
    # else:
    #     print num_generations

    # if len(frontier) > 50:
    #     print len(frontier)

    # print lives

    # print "Frontier Size:" + str(len(frontier))

    # last_era = [[] for _ in range(model.num_objs)]

    if len(frontier) == 0:
        print "uh oh"
        if attempts == 0:
            return 0
        return ga(model, mp=0.1, cop=1, select=ga_select, population_size=100, num_generations=1000, attempts=attempts - 1)

    return hve(frontier, obj_mins, obj_maxs, type1)
コード例 #10
0
def mws(model, p=0.5, threshold=0.001, max_tries=100, max_changes=10, era_size=10, era0=None, lives=5):

    best_can = None

    normalize = prerun(model)
    aggregate = model.aggregate

    def n_score(can):
        return normalize(aggregate(can))

    # if energy of can1 is less than that of can2
    # can1 is better and this returns true
    def type1(can1, can2):
        return (n_score(can1) < n_score(can2))

    def type2(era1, era2):
        # a12 returns times that lst1 is greater than lst2
        total = 0
        n = 0
        for obj_scores1, obj_scores2 in zip(era1, era2):
            # If this is 1, that means new one is worse
            total += a12(obj_scores1, obj_scores2)
            n += 1
        return (total / n >= 0.5)

    best_score = 1.0

    if not era0:
        curr_era = []
        curr_lives = lives

    else:
        # List of List. Need to deepcopy internal list too
        curr_era = []
        era0_copy = list(era0)
        for can in era0_copy:
            curr_era += []
            model.eval(can)
            obj_scores = [x for x in can.scores]
            curr_era += [obj_scores]

        best_can = era0_copy[0]
        for can in era0_copy:
            can_score = n_score(can)
            if can_score < n_score(best_can):
                best_can = can
                best_score = can_score

    # This stores a list of era entries, i.e a list of  [list of objective scores for every candidate in the era]
    # Assume one era is 5 candidates
    # One Era entry for a model with 2 objectives: [[0.5,0.5], [0.2,0.3], [0.5,0.5], [0.2,0.3], [0.5,0.5]]
    # All era entries will be stored in eras (Assume 2 eras): [[[0.5,0.5], [0.2,0.3], [0.5,0.5], [0.2,0.3], [0.5,0.5]],
    #                                                         [[0.5,0.5], [0.2,0.3], [0.5,0.5], [0.2,0.3], [0.5,0.5]]]
    eras = []

    out = []

    candidate = None

    i = -1
    thresh_flag = False

    while not thresh_flag:
        i += 1
        j = 0
        if i == max_tries:
            if curr_era:
                eras += [curr_era]
                curr_era = []
            out += ["\nReached Max Tries"]
            break

        if i % era_size == 0:
            out += ["\n" + str(best_score) + " "]
            if curr_era:
                eras += [curr_era]
                curr_era = []
                if len(eras) > 1:
                    last_index = len(eras) - 1
                    # If there is improvement reset lives, else decrement
                    if (type2( eras[last_index - 1], eras[last_index])):
                        curr_lives = lives
                    else:
                        curr_lives -= 1
                        if curr_lives == 0:
                            out += ["\nNo more Lives"]
                            break

        if candidate is not None:
            prev_candidate = candidate

        if i == 0:
            while candidate is None:
                candidate = model.gen_candidate()
        else:
            candidate = model.gen_candidate()

        # could not generate a valid candidate after patience tries
        if candidate is None:
            out += ["."]
            model.eval(prev_candidate)
            obj_scores = [x for x in prev_candidate.scores]
            curr_era += [obj_scores]
            continue

        if best_can is None:
            best_can = candidate
            best_score = n_score(candidate)

        for j in range(0, max_changes):
            model.eval(candidate)
            score = n_score(candidate)

            model.eval(candidate)
            obj_scores = [x for x in candidate.scores]
            curr_era += [obj_scores]

            if score < best_score:
                out += ["!"]
                best_can = candidate
                best_score = score

            if best_score < threshold:
                if curr_era:
                    eras += [curr_era]
                    curr_era = []
                out += ["\nScore satisfies threshold"]
                thresh_flag = True
                break

            # choose a random decision
            c = random.randrange(0, len(model.decs))

            if p < random.random():
                # change the decision randomly
                # ensure it is valid
                patience = model.patience
                while(patience > 0):
                    new_can = Candidate(dec_vals=list(candidate.dec_vals))
                    new_can.dec_vals[c] = model.decs[c].generate_valid_val()
                    if model.ok(new_can):
                        candidate = new_can
                        out += ["?"]
                        break
                    patience -= 1
                if patience == 0:
                    out += ["."]

            else:
                orig_score = n_score(candidate)
                candidate = mws_optimize(model, candidate, c)
                new_score = normalize(model.aggregate(candidate))
                if orig_score != new_score:
                    out += ["+"]
                else:
                    out += ["."]

    print ''.join(out)
    print "\niterations:" + str(max_changes * i + j)
    print "Best Score:" + str(normalize(model.aggregate(best_can)))

    return best_can, normalize(model.aggregate(best_can))
コード例 #11
0
ファイル: optimizers.py プロジェクト: rchakra3/x9115rc3
def mws(model, p=0.5, threshold=0.001, max_tries=100, max_changes=10, optimal='low'):

    best_can = None

    normalize = prerun(model)

    for i in range(0, max_tries):
        candidate = model.gen_candidate()

        # could not generate a valid candidate after patience tries
        if candidate is None:
            continue

        if best_can is None:
            best_can = candidate
            best_score = normalize(model.aggregate(candidate))

        if i % 10 == 0:
            print "\n" + str(best_score),

        for j in range(0, max_changes):
            model.eval(candidate)
            score = normalize(model.aggregate(candidate))

            if optimal == 'low':
                if score < threshold:
                    print "\niterations:" + str(i * max_changes + j)
                    print "Score:" + str(score)
                    return candidate, score

                if score < best_score:
                    print "!",
                    best_can = candidate
                    best_score = score

            else:
                if score > threshold:
                    print "iterations:" + str(i * max_changes + j)
                    print "Score:" + str(score)
                    return candidate, score

                if score > best_score:
                    print "!",
                    best_can = candidate
                    best_score = score

            # choose a random decision
            c = random.randrange(0, len(model.decs))

            if p < random.random():
                # change the decision randomly
                # ensure it is valid
                patience = model.patience
                while(patience > 0):
                    new_can = Candidate(dec_vals=list(candidate.dec_vals))
                    new_can.dec_vals[c] = model.decs[c].generate_valid_val()
                    if model.ok(new_can):
                        candidate = new_can
                        print "?",
                        break
                    patience -= 1
                if patience == 0:
                    print ".",

            else:
                orig_score = normalize(model.aggregate(candidate))
                candidate = mws_optimize(model, candidate, c, optimal)
                new_score = normalize(model.aggregate(candidate))
                if orig_score != new_score:
                    print "+",
                else:
                    print ".",

    print "\niterations:" + str(max_changes * max_tries)
    print "Best Score:" + str(normalize(model.aggregate(best_can)))

    return best_can, normalize(model.aggregate(best_can))
コード例 #12
0
ファイル: mws2.py プロジェクト: rchakra3/x9115rc3
def mws(model, p=0.5, threshold=0.001, max_tries=500, max_changes=10, era_size=100, era0=None, lives=5):

    best_can = None

    max_tries = max_tries / max_changes

    normalize = prerun(model)
    aggregate = model.aggregate

    def n_score(can):
        return aggregate(can)
        # return normalize(aggregate(can))

    # if energy of can1 is less than that of can2
    # can1 is better and this returns true
    def type1(can1, can2):
        return (n_score(can1) < n_score(can2))

    # def type2(era1, era2):
    #     # a12 returns times that lst1 is greater than lst2
    #     total = 0
    #     n = 0
    #     for obj_scores1, obj_scores2 in zip(era1, era2):
    #         # If this is 1, that means new one is worse
    #         total += a12(obj_scores1, obj_scores2)
    #         n += 1
    #     return (total / n >= 0.5)
    def type2(era1, era2):
        # a12 returns times that lst1 is greater than lst2
        # total = 0
        # n = 0
        # for obj_scores1, obj_scores2 in zip(era1, era2):
        #     # If this is 1, that means era1 is greater more often
        #     # If minimizing, this means era1 is worse
        #     total += a12(obj_scores1, obj_scores2)
        #     n += 1
        # return (total / n >= 0.5)

        # Currently returns true if even one of the objectives have improved
        # print "here:" + str(len(era2))
        # print "*****#############*************"
        for index, objective in enumerate(era2):
            # print "comparing:\n" + str(era1[index])
            # print "and\n"
            # print str(objective)
            # print "******"
            a12_score = a12(era1[index], objective)
            # print "######"
            # print a12_score
            # print "######"
            if (a12_score >= 0.56):
                # print "######"
                # print objective
                # print era1[index]
                # print a12_score
                # print "######"
                return True
        # print "######"
        # print a12_score
        # print "######"
        return False

    # best_score = 1.0

    if not era0:
        print "ERRRRRRRRRRRRRRRRRRRRRRRRRRRROR"
        curr_era = [[] for _ in model.objectives()]
        curr_lives = lives

    else:
        # List of List. Need to deepcopy internal list too
        curr_lives = lives
        era0_copy = []
        for can in era0:
            new_can = Candidate(dec_vals=can.dec_vals, scores=can.scores)
            era0_copy += [new_can]
        curr_era = [[] for _ in model.objectives()]
        for can in era0_copy:
            model.eval(can)
            obj_scores = [x for x in can.scores]
            for index, score in enumerate(obj_scores):
                curr_era[index] += [score]

        best_can = era0_copy[0]
        best_score = n_score(best_can)
        for can in era0_copy:
            if type1(can, best_can):
                best_can = can
                best_score = n_score(best_can)
        # curr_can = era0_copy[len(era0_copy) - 1]
        # curr_score = n_score(curr_can)

    # This stores a list of era entries, i.e a list of  [list of objective scores for every candidate in the era]
    # Assume one era is 5 candidates
    # One Era entry for a model with 2 objectives: [[0.5,0.5], [0.2,0.3], [0.5,0.5], [0.2,0.3], [0.5,0.5]]
    # All era entries will be stored in eras (Assume 2 eras): [[[0.5,0.5], [0.2,0.3], [0.5,0.5], [0.2,0.3], [0.5,0.5]],
    #                                                         [[0.5,0.5], [0.2,0.3], [0.5,0.5], [0.2,0.3], [0.5,0.5]]]
    eras = []

    out = []

    candidate = None

    i = -1
    thresh_flag = False

    while not thresh_flag:
        # print "New Try"
        i += 1
        j = 0
        if i == max_tries:
            if curr_era:
                eras += [curr_era]
                # curr_era = [[] for _ in model.objectives()]
            # print "Reached max tries"
            out += ["\nReached Max Tries"]
            break

        if (i * max_changes) % era_size == 0:
            out += ["\n" + str(best_score) + " "]
            if curr_era:
                eras += [curr_era]
                # print len(curr_era[0])
                curr_era = [[] for _ in model.objectives()]
                if len(eras) > 1:
                    # print str(i)+":"+str(i*max_changes)+":lives", curr_lives
                    last_index = len(eras) - 1
                    # If there is improvement reset lives, else decrement
                    if (type2(eras[last_index - 1], eras[last_index])):
                        curr_lives += lives
                    else:
                        curr_lives -= 1
                        if curr_lives == 0:
                            # print "No more"
                            out += ["\nNo more Lives"]
                            break

        if candidate is not None:
            prev_candidate = candidate

        if i == 0:
            while candidate is None:
                candidate = model.gen_candidate()
        else:
            candidate = model.gen_candidate()

        # could not generate a valid candidate after patience tries
        if candidate is None:
            out += ["."]
            model.eval(prev_candidate)
            obj_scores = [x for x in prev_candidate.scores]
            for index, score in enumerate(obj_scores):
                curr_era[index] += [score]
            continue

        if best_can is None:
            best_can = candidate
            best_score = n_score(candidate)

        for j in range(0, max_changes):
            model.eval(candidate)
            score = n_score(candidate)
            # print score

            model.eval(candidate)
            obj_scores = [x for x in candidate.scores]
            for index, score in enumerate(obj_scores):
                curr_era[index] += [score]

            if type1(candidate, best_can):  # score < best_score:
                out += ["!"]
                best_can = candidate
                best_score = score

            # if best_score < threshold:
            #     if curr_era:
            #         eras += [curr_era]
            #         curr_era = [[] for _ in model.objectives()]
            #     out += ["\nScore satisfies threshold"]
            #     thresh_flag = True
            #     break

            # choose a random decision
            c = random.randrange(0, len(model.decs))

            if p < random.random():
                # change the decision randomly
                # ensure it is valid
                patience = model.patience
                while(patience > 0):
                    new_can = Candidate(dec_vals=list(candidate.dec_vals))
                    new_can.dec_vals[c] = model.decs[c].generate_valid_val()
                    if model.ok(new_can):
                        candidate = new_can
                        out += ["?"]
                        break
                    patience -= 1
                if patience == 0:
                    out += ["."]

            else:
                orig_score = n_score(candidate)
                candidate = mws_optimize(model, candidate, c, type1)
                new_score = normalize(model.aggregate(candidate))
                if orig_score != new_score:
                    out += ["+"]
                else:
                    out += ["."]

    # print ''.join(out)
    # print "\niterations:" + str(max_changes * i + j)
    # print "Best Score:" + str(normalize(model.aggregate(best_can)))

    # print eras[len(eras)-1]

    return best_can, model.aggregate(best_can), eras[len(eras) - 1]
コード例 #13
0
def de(model,
       frontier_size=10,
       cop=0.4,
       ea=0.5,
       max_tries=100,
       threshold=0.01,
       era_size=10,
       era0=None,
       lives=5):

    # normalizers = prerun_each_obj(model, runs=10000)
    out = []

    repeat = int(max_tries / era_size)
    print "Repeat:" + str(repeat)
    frontier_size = era_size

    energy = model.aggregate

    # def energy(candidate, eval_func=model.eval, normalizers=normalizers):
    #     # This evaluates the objs and stores them candidate.scores
    #     eval_func(candidate)
    #     # Just for fun
    #     normalized_scores = [normalize(x) for normalize, x in zip(normalizers, candidate.scores)]
    #     # The distance of score of each objective from hell
    #     hell_dist = [(1 - x) for x in normalized_scores]

    #     sum_of_squares = sum([x ** 2 for x in hell_dist])

    #     energy = 1 - (math.sqrt(sum_of_squares) / math.sqrt(len(hell_dist)))

    #     return energy

    def type1(can1, can2):
        return (energy(can1) < energy(can2))

    def type2(era1, era2):
        # a12 returns times that lst1 is greater than lst2
        # total = 0
        # n = 0
        # for obj_scores1, obj_scores2 in zip(era1, era2):
        #     # If this is 1, that means era1 is greater more often
        #     # If minimizing, this means era1 is worse
        #     total += a12(obj_scores1, obj_scores2)
        #     n += 1
        # return (total / n >= 0.5)

        # Currently returns true if even one of the objectives have improved
        # print "here:" + str(len(era2))
        # print "*****#############*************"
        for index, objective in enumerate(era2):
            # print "comparing:\n" + str(era1[index])
            # print "and\n"
            # print str(objective)
            # print "******"
            a12_score = a12(era1[index], objective)
            # print "######"
            # print a12_score
            # print "######"
            if (a12_score >= 0.56):
                # print "######"
                # print objective
                # print era1[index]
                # print a12_score
                # print "######"
                return True
        # print "######"
        # print a12_score
        # print "######"
        return False

    frontier = []
    total = 0
    n = 0

    if not era0:
        for i in range(frontier_size):
            can = model.gen_candidate()
            while can is None:
                can = model.gen_candidate()
            frontier += [can]
            total += energy(can)
            n += 1

    else:
        for can in era0:
            p = Candidate(dec_vals=can.dec_vals, scores=can.scores)
            frontier += [p]
        total = sum([energy(can) for can in frontier])
        n = len(frontier)

    curr_era = [[] for _ in model.objectives()]

    # print "model_objectives_len:" + str(len(curr_era))

    for can in frontier:
        model.eval(can)
        obj_scores = [x for x in can.scores]
        for index, score in enumerate(obj_scores):
            curr_era[index] += [score]

    # Currently treating candidates as having only one objective i.e. energy
    # which we're minimizing
    eras = [curr_era]
    curr_era = [[] for _ in model.objectives()]

    best_score = total / n
    curr_lives = lives
    early_end = False

    for j in range(repeat):

        # if j % era_size == 0:
        out += ["\n" + str(best_score) + " "]

        total, n = de_update(frontier, cop, ea, energy, out, model.decisions())
        if total / n < threshold:
            best_score = total / n
            out += ["!"]
            out += ["\nScore satisfies Threshold"]
            break

        elif total / n < best_score:
            best_score = total / n
            out += ["!"]

        for can in frontier:
            model.eval(can)
            obj_scores = [x for x in can.scores]
            # print "obj_scores_len:" + str(len(obj_scores))
            for index, score in enumerate(obj_scores):
                curr_era[index] += [score]

        eras += [curr_era]
        curr_era = [[] for _ in model.objectives()]

        if len(eras) > 1:
            if type2(eras[len(eras) - 2], eras[len(eras) - 1]):
                curr_lives += lives
            else:
                curr_lives -= 1
                if curr_lives == 0:
                    # print "No more"
                    out += ["\nNo more Lives"]
                    break

    # print ''.join(out)
    # print "\nNumber of repeats:" + str(j + 1)
    # print "Best Score:" + str(best_score)
    return _, best_score, eras[len(eras) - 1]
コード例 #14
0
ファイル: optimizers.py プロジェクト: rchakra3/x9115rc3
def mws(model,
        p=0.5,
        threshold=0.001,
        max_tries=100,
        max_changes=10,
        optimal='low'):

    best_can = None

    normalize = prerun(model)

    for i in range(0, max_tries):
        candidate = model.gen_candidate()

        # could not generate a valid candidate after patience tries
        if candidate is None:
            continue

        if best_can is None:
            best_can = candidate
            best_score = normalize(model.aggregate(candidate))

        if i % 10 == 0:
            print "\n" + str(best_score),

        for j in range(0, max_changes):
            model.eval(candidate)
            score = normalize(model.aggregate(candidate))

            if optimal == 'low':
                if score < threshold:
                    print "\niterations:" + str(i * max_changes + j)
                    print "Score:" + str(score)
                    return candidate, score

                if score < best_score:
                    print "!",
                    best_can = candidate
                    best_score = score

            else:
                if score > threshold:
                    print "iterations:" + str(i * max_changes + j)
                    print "Score:" + str(score)
                    return candidate, score

                if score > best_score:
                    print "!",
                    best_can = candidate
                    best_score = score

            # choose a random decision
            c = random.randrange(0, len(model.decs))

            if p < random.random():
                # change the decision randomly
                # ensure it is valid
                patience = model.patience
                while (patience > 0):
                    new_can = Candidate(dec_vals=list(candidate.dec_vals))
                    new_can.dec_vals[c] = model.decs[c].generate_valid_val()
                    if model.ok(new_can):
                        candidate = new_can
                        print "?",
                        break
                    patience -= 1
                if patience == 0:
                    print ".",

            else:
                orig_score = normalize(model.aggregate(candidate))
                candidate = mws_optimize(model, candidate, c, optimal)
                new_score = normalize(model.aggregate(candidate))
                if orig_score != new_score:
                    print "+",
                else:
                    print ".",

    print "\niterations:" + str(max_changes * max_tries)
    print "Best Score:" + str(normalize(model.aggregate(best_can)))

    return best_can, normalize(model.aggregate(best_can))
コード例 #15
0
ファイル: sa.py プロジェクト: rchakra3/x9115rc3
def sa(model,
       p=sa_default_prob,
       threshold=0.001,
       max_tries=100000,
       lives=10,
       era_size=2000,
       era0=None):

    normalize = prerun(model)

    aggregate = model.aggregate

    def actual_n_score(can):
        return normalize(aggregate(can))

    def n_score(can):
        return aggregate(can)
        # return normalize(aggregate(can))

    # if energy of can1 is less than that of can2
    # can1 is better and this returns true
    def type1(can1, can2):
        # res = cdom(can1.dec_vals, can2.dec_vals)
        # # print "*************************************"
        # # print res
        # # print "*************************************"
        # if res == can1.dec_vals:
        #     # print "TrueTrueTrueTrueTrueTrueTrueTrueTrueTrueTrueTrueTrueTrueTrueTrueTrueTrueTrueTrueTrueTrueTrue"
        #     return True
        # else:
        #     return False
        return (n_score(can1) < n_score(can2))

    def type2(era1, era2):
        # a12 returns times that lst1 is greater than lst2
        # total = 0
        # n = 0
        # for obj_scores1, obj_scores2 in zip(era1, era2):
        #     # If this is 1, that means era1 is greater more often
        #     # If minimizing, this means era1 is worse
        #     total += a12(obj_scores1, obj_scores2)
        #     n += 1
        # return (total / n >= 0.5)

        # Currently returns true if even one of the objectives have improved
        # print "here:" + str(len(era2))
        # print "*****#############*************"
        for index, objective in enumerate(era2):
            # print "comparing:\n" + str(era1[index])
            # print "and\n"
            # print str(objective)
            # print "******"
            a12_score = a12(era1[index], objective)
            # print "######"
            # print a12_score
            # print "######"
            if (a12_score >= 0.56):
                # print "######"
                # print objective
                # print era1[index]
                # print a12_score
                # print "######"
                return True
        # print "######"
        # print a12_score
        # print "######"
        return False

    # One era is a list of size era_size
    # Each element is a list with all the values of an objective in that era
    # So basically: era = [[can1.obj1_score, can2.obj1_score],
    #                      [can1.obj2.score, can2.obj2.score]]

    if not era0:
        best_can = model.gen_candidate()
        while best_can is None:
            best_can = model.gen_candidate()

        best_score = n_score(best_can)
        curr_can = best_can
        curr_score = best_score
        curr_era = [[] for _ in model.objectives()]

    else:
        # List of List. Need to deepcopy internal list too
        era0_copy = []
        for can in era0:
            new_can = Candidate(dec_vals=can.dec_vals, scores=can.scores)
            era0_copy += [new_can]
        curr_era = [[] for _ in model.objectives()]
        for can in era0_copy:
            model.eval(can)
            obj_scores = [x for x in can.scores]
            for index, score in enumerate(obj_scores):
                curr_era[index] += [score]

        best_can = era0_copy[0]
        for can in era0_copy:
            if type1(can, best_can):
                best_can = can
        curr_can = era0_copy[len(era0_copy) - 1]
        curr_score = n_score(curr_can)

    best_score = n_score(best_can)

    out = []

    eras = []
    curr_lives = lives
    i = -1

    # If in `lives` eras there is no improvement, EXIT
    # If iterations > max_tries, EXIT
    # If n_score <threshold, EXIT

    gen_count = 0
    none_count = 0

    early_termination = False

    random_jumps = 0
    gt_50 = 0

    while True:
        i += 1

        if i == max_tries:
            out += ["\nReached max tries"]
            if curr_era:
                eras += [curr_era]
                curr_era = [[] for _ in model.objectives()]
            break

        # Beginning of a new ERA
        if i % era_size == 0:
            # print "Random Jumps this era:" + str(random_jumps)
            # print "gt_50 this era:" + str(gt_50)
            gt_50 = 0
            random_jumps = 0
            out += ["\n" + str(best_score) + " "]
            if curr_era:
                eras += [curr_era]
                curr_era = [[] for _ in model.objectives()]
                if len(eras) > 1:
                    last_index = len(eras) - 1
                    # If there is improvement reset lives, else decrement
                    if (type2(eras[last_index - 1], eras[last_index])):
                        curr_lives += lives
                    else:
                        curr_lives -= 1
                        if curr_lives == 0:
                            print "No more lives"
                            out += ["\nNo more Lives"]
                            early_termination = True
                            break

        new_can = model.gen_candidate()
        # new_can = model.gen_can_from_prev(curr_can)

        gen_count += 1
        if new_can is None:
            none_count += 1
            out += ["."]
            model.eval(curr_can)
            obj_scores = [x for x in curr_can.scores]
            for index, score in enumerate(obj_scores):
                curr_era[index] += [score]
            continue

        model.eval(new_can)
        obj_scores = [x for x in new_can.scores]
        for index, score in enumerate(obj_scores):
            curr_era[index] += [score]

        new_score = n_score(new_can)

        flag = True
        # if new_score < best_score:
        if type1(new_can, best_can):
            best_score = new_score
            best_can = new_can
            # print "!"
            out += ["!"]
            flag = False

        # if new_score < curr_score:
        norm_curr_score = actual_n_score(curr_can)
        norm_new_score = actual_n_score(new_can)
        prob = p(norm_curr_score, norm_new_score, ((i / max_tries)))
        # print "****************************"
        # print (norm_curr_score - norm_new_score)
        # print (i/max_tries)
        if (prob > 0.5):
            gt_50 += 1
            # print str(prob)
        if type1(new_can, curr_can):
            # print "new can won"
            curr_score = new_score
            curr_can = new_can
            out += ["+"]
            flag = False

        elif prob < random.random():
            # print "random jump"
            curr_score = new_score
            curr_can = new_can
            out += ["?"]
            random_jumps += 1
            flag = False

        # else:
        #     print "_!_"

        if best_score < threshold:
            early_termination = True
            out += ["\nScore satisfies Threshold"]
            break

        if flag is True:
            out += "."

    if curr_era:
        eras += [curr_era]

    # for era in eras:
    #     print era
    #     print "******"
    # break
    # print eras[len(eras) - 2]

    # print len(eras[0])
    # print len(eras[len(eras) - 2])

    # print ''.join(out)
    # print "gen_count:" + str(gen_count)
    # print "none_count" + str(none_count)
    # print "\nLen (eras):" + str(len(eras))
    # print "\niterations:" + str(i)
    # print "Score:" + str(best_score)
    return best_can, best_score, eras[len(eras) - 2]
コード例 #16
0
ファイル: mws2.py プロジェクト: rchakra3/x9115rc3
def mws(model,
        p=0.5,
        threshold=0.001,
        max_tries=500,
        max_changes=10,
        era_size=100,
        era0=None,
        lives=5):

    best_can = None

    max_tries = max_tries / max_changes

    normalize = prerun(model)
    aggregate = model.aggregate

    def n_score(can):
        return aggregate(can)
        # return normalize(aggregate(can))

    # if energy of can1 is less than that of can2
    # can1 is better and this returns true
    def type1(can1, can2):
        return (n_score(can1) < n_score(can2))

    # def type2(era1, era2):
    #     # a12 returns times that lst1 is greater than lst2
    #     total = 0
    #     n = 0
    #     for obj_scores1, obj_scores2 in zip(era1, era2):
    #         # If this is 1, that means new one is worse
    #         total += a12(obj_scores1, obj_scores2)
    #         n += 1
    #     return (total / n >= 0.5)
    def type2(era1, era2):
        # a12 returns times that lst1 is greater than lst2
        # total = 0
        # n = 0
        # for obj_scores1, obj_scores2 in zip(era1, era2):
        #     # If this is 1, that means era1 is greater more often
        #     # If minimizing, this means era1 is worse
        #     total += a12(obj_scores1, obj_scores2)
        #     n += 1
        # return (total / n >= 0.5)

        # Currently returns true if even one of the objectives have improved
        # print "here:" + str(len(era2))
        # print "*****#############*************"
        for index, objective in enumerate(era2):
            # print "comparing:\n" + str(era1[index])
            # print "and\n"
            # print str(objective)
            # print "******"
            a12_score = a12(era1[index], objective)
            # print "######"
            # print a12_score
            # print "######"
            if (a12_score >= 0.56):
                # print "######"
                # print objective
                # print era1[index]
                # print a12_score
                # print "######"
                return True
        # print "######"
        # print a12_score
        # print "######"
        return False

    # best_score = 1.0

    if not era0:
        print "ERRRRRRRRRRRRRRRRRRRRRRRRRRRROR"
        curr_era = [[] for _ in model.objectives()]
        curr_lives = lives

    else:
        # List of List. Need to deepcopy internal list too
        curr_lives = lives
        era0_copy = []
        for can in era0:
            new_can = Candidate(dec_vals=can.dec_vals, scores=can.scores)
            era0_copy += [new_can]
        curr_era = [[] for _ in model.objectives()]
        for can in era0_copy:
            model.eval(can)
            obj_scores = [x for x in can.scores]
            for index, score in enumerate(obj_scores):
                curr_era[index] += [score]

        best_can = era0_copy[0]
        best_score = n_score(best_can)
        for can in era0_copy:
            if type1(can, best_can):
                best_can = can
                best_score = n_score(best_can)
        # curr_can = era0_copy[len(era0_copy) - 1]
        # curr_score = n_score(curr_can)

    # This stores a list of era entries, i.e a list of  [list of objective scores for every candidate in the era]
    # Assume one era is 5 candidates
    # One Era entry for a model with 2 objectives: [[0.5,0.5], [0.2,0.3], [0.5,0.5], [0.2,0.3], [0.5,0.5]]
    # All era entries will be stored in eras (Assume 2 eras): [[[0.5,0.5], [0.2,0.3], [0.5,0.5], [0.2,0.3], [0.5,0.5]],
    #                                                         [[0.5,0.5], [0.2,0.3], [0.5,0.5], [0.2,0.3], [0.5,0.5]]]
    eras = []

    out = []

    candidate = None

    i = -1
    thresh_flag = False

    while not thresh_flag:
        # print "New Try"
        i += 1
        j = 0
        if i == max_tries:
            if curr_era:
                eras += [curr_era]
                # curr_era = [[] for _ in model.objectives()]
            # print "Reached max tries"
            out += ["\nReached Max Tries"]
            break

        if (i * max_changes) % era_size == 0:
            out += ["\n" + str(best_score) + " "]
            if curr_era:
                eras += [curr_era]
                # print len(curr_era[0])
                curr_era = [[] for _ in model.objectives()]
                if len(eras) > 1:
                    # print str(i)+":"+str(i*max_changes)+":lives", curr_lives
                    last_index = len(eras) - 1
                    # If there is improvement reset lives, else decrement
                    if (type2(eras[last_index - 1], eras[last_index])):
                        curr_lives += lives
                    else:
                        curr_lives -= 1
                        if curr_lives == 0:
                            # print "No more"
                            out += ["\nNo more Lives"]
                            break

        if candidate is not None:
            prev_candidate = candidate

        if i == 0:
            while candidate is None:
                candidate = model.gen_candidate()
        else:
            candidate = model.gen_candidate()

        # could not generate a valid candidate after patience tries
        if candidate is None:
            out += ["."]
            model.eval(prev_candidate)
            obj_scores = [x for x in prev_candidate.scores]
            for index, score in enumerate(obj_scores):
                curr_era[index] += [score]
            continue

        if best_can is None:
            best_can = candidate
            best_score = n_score(candidate)

        for j in range(0, max_changes):
            model.eval(candidate)
            score = n_score(candidate)
            # print score

            model.eval(candidate)
            obj_scores = [x for x in candidate.scores]
            for index, score in enumerate(obj_scores):
                curr_era[index] += [score]

            if type1(candidate, best_can):  # score < best_score:
                out += ["!"]
                best_can = candidate
                best_score = score

            # if best_score < threshold:
            #     if curr_era:
            #         eras += [curr_era]
            #         curr_era = [[] for _ in model.objectives()]
            #     out += ["\nScore satisfies threshold"]
            #     thresh_flag = True
            #     break

            # choose a random decision
            c = random.randrange(0, len(model.decs))

            if p < random.random():
                # change the decision randomly
                # ensure it is valid
                patience = model.patience
                while (patience > 0):
                    new_can = Candidate(dec_vals=list(candidate.dec_vals))
                    new_can.dec_vals[c] = model.decs[c].generate_valid_val()
                    if model.ok(new_can):
                        candidate = new_can
                        out += ["?"]
                        break
                    patience -= 1
                if patience == 0:
                    out += ["."]

            else:
                orig_score = n_score(candidate)
                candidate = mws_optimize(model, candidate, c, type1)
                new_score = normalize(model.aggregate(candidate))
                if orig_score != new_score:
                    out += ["+"]
                else:
                    out += ["."]

    # print ''.join(out)
    # print "\niterations:" + str(max_changes * i + j)
    # print "Best Score:" + str(normalize(model.aggregate(best_can)))

    # print eras[len(eras)-1]

    return best_can, model.aggregate(best_can), eras[len(eras) - 1]