Beispiel #1
0
    def append_project(self, _group, _project, _tech, _isUnion):
        resultFiles = []
        if _isUnion is False:
            for version in self.S.bugs[_project].keys():
                if version == 'all': continue
                versionName = u'%s' % version
                resultFiles.append(
                    self.S.getPath_results(self.TYPE, _tech, _group, _project,
                                           versionName))
        else:
            resultFiles.append(
                self.S.getPath_results(self.TYPE, _tech, _group, _project,
                                       'all'))

        ev = Evaluator(_tech, _project)
        ev.load(resultFiles)
        ev.evaluate(self.S.answers[_project]['all'],
                    len(self.S.bugs[_project]['all']))

        self.fill_SummarySheet(self.summarySheet, _group, _tech, _project,
                               ev.projectSummary,
                               self.S.sources[_project]['max'],
                               len(self.S.bugs[_project]['all']),
                               len(ev.bugSummaries))
        self.fill_bugDataSheet(self.bugSheet, _tech, _group, _project,
                               ev.bugSummaries,
                               self.S.answers[_project]['all'])
        self.fill_DataSheet(self.dataSheet, _tech, _group, _project,
                            ev.bugSummaries, ev.rawData,
                            self.S.sources[_project],
                            self.S.answers[_project]['all'])
        pass
Beispiel #2
0
def main():
    start = time.time()

    argparser.parseArguments()

    logging.basicConfig(stream=sys.stderr,
                        format='[%(asctime)s] %(name)s:%(message)s',
                        datefmt='%H:%M:%S',
                        level=logging.INFO)
    logger = logging.getLogger("main")
    
    logger.info("Init and config agent builder...")
    agentBuilder = AgentBuilder(UserAgentModel=SimpleUserAgent,
                                ObjectAgentModel=SimpleObjectAgent)
    userAgents, objectAgents = agentBuilder.build()

    logger.info("Init and config simulation setting...")
    simulator = TimeBasedSimulator( userAgents=userAgents,
                                    objectAgents=objectAgents,
                                    startTime=0, endTime=24, unitTime=1)

    logger.info("Start simulation...")
    simulator.run()
    end = time.time()
    logger.info("Simulation time: %f s"%(end - start))
    # simulator.showLog()

    if argparser.sargs.evaluation: 
        evaluator.evaluate(simulator)
Beispiel #3
0
def evaluate(goldfile, autofile, joint, model):
    print "goldfile -- ", goldfile
    print "autofile -- ", autofile
    f = codecs.open(goldfile, "r", "utf-8")
    gold = f.readlines()
    f.close()
    f = codecs.open(autofile, "r", "utf-8")
    auto = f.readlines()
    f.close()

    if model == "maxent":
        goldTags = [item.split()[1] for item in gold if item.split() != []]
        autoTags = [chooseMax(item.split()[1:]) for item in auto if item.split() != []]
        idTags = [item.split()[0] for item in auto if item.split() != []]
        print "koala !!! len of gold tags --", len(goldTags), "len of auto tags --", len(autoTags)
    elif model == "crfpp":
        pass
    elif model == "crfsuite":
        pass
    
    if joint == "joint":
        print "\nfirst tag in joint tags --"
        evaluator = Evaluator([tag.split("_")[0] for tag in goldTags], [tag.split("_")[0] for tag in autoTags], gold)
        correct, total = evaluator.evaluate()
        print "\nsecond tag in joint tags --"
        evaluator = Evaluator([tag.split("_")[1] for tag in goldTags], [tag.split("_")[1] for tag in autoTags], gold)
        correct, total = evaluator.evaluate()
    else:
        evaluator = Evaluator(goldTags, autoTags, gold)
        correct, total = evaluator.evaluate()
    return correct, total, zip(idTags, autoTags)
Beispiel #4
0
    def generate(self, quantity=1, operators=7, enable_power=False):
        count = 0
        for loop in range(quantity * 2):
            if count >= quantity:
                break
            nums = [
                BiTree(0, random.randint(0, self.UPPERCAP))
                for _ in range(1, operators + 2)
            ]
            ops = [
                BiTree(
                    1,
                    random.randint(0,
                                   len(BiTree.operators) - 2 + enable_power))
                for _ in range(operators)
            ]
            unfilled = ops[:]
            filled = nums[:]
            ev = Evaluator()

            # 链接成树
            while len(unfilled):
                i = random.randint(0, len(filled) - 1)
                unfilled[0].set_lchild(filled[i])
                filled.pop(i)

                # 除法 乘方 特殊处理
                i = random.randint(0, len(filled) - 1)
                if unfilled[0].node_type == 1 and unfilled[0].val == 3:
                    # 除数为0
                    if ev.evaluate(filled[i]) == 0:
                        unfilled[0].set_rchild(
                            BiTree(0, random.randint(1, self.UPPERCAP)))
                    else:
                        unfilled[0].set_rchild(filled[i])
                elif unfilled[0].node_type == 1 and unfilled[0].val == 4:
                    # 指数过大 或 为分数
                    if abs(ev.evaluate(filled[i])) > 2 or ev.evaluate(
                            filled[i]).denominator != 1:
                        unfilled[0].set_rchild(BiTree(0, random.randint(1, 2)))
                    else:
                        unfilled[0].set_rchild(filled[i])
                else:
                    unfilled[0].set_rchild(filled[i])
                filled.pop(i)

                filled.append(unfilled[0])
                unfilled.pop(0)

            # 去重
            if not self.deduplicate(filled[-1]):
                continue
            # 检查除数为0
            try:
                ev.evaluate(filled[-1])
            except ZeroDivisionError:
                continue
            self.output_list.append(filled[-1])
            count += 1
Beispiel #5
0
def test_experiment(experiment):
    try:
        DataGenerator = experiment['datagen']
        Model = experiment['model']
        clf_name = experiment['name']
    except KeyError:
        print(
            "Experiment dictionary must be contain dataget, model and name keys."
        )

    # optional parameters
    lr = experiment.get('lr', 1e-4)
    eps = experiment.get('eps', 1e-8)
    overlap = experiment.get('overlap', 'minimum')
    max_epochs = experiment.get('max_epochs', 200)
    patience = experiment.get('patience', 15)
    directory = experiment.get('directory', "D:/Adrien/dataset/GlaS/train")
    image_size = experiment.get('image_size', (256, 384))
    batch_size = experiment.get('batch_size', 5)
    validation_size = experiment.get('validation_size', 10)
    min_area = experiment.get('min_area', 4000)

    tf.keras.backend.clear_session()

    model = Model(image_size, clf_name, loadFrom=f'{clf_name}.hdf5')

    # Test A
    # Compute & save metrics
    generator = DataGenerator(batch_size,
                              validation_size,
                              os.path.join(directory, 'testA'),
                              image_size,
                              train_test='testA')
    metrics = Evaluator.evaluate(model, generator, 'train', overlap, min_area)

    with open(f"{clf_name}_testA_metrics.txt", 'w') as fp:
        print("Test A")
        print("Precision\tRecall\tMCC", file=fp)
        print(metrics.mean(axis=0), file=fp)
        print(np.median(metrics, axis=0), file=fp)

    np.save(f"{clf_name}_testA_metrics.npy", metrics)

    generator = DataGenerator(batch_size,
                              validation_size,
                              os.path.join(directory, 'testB'),
                              image_size,
                              train_test='testB')
    metrics = Evaluator.evaluate(model, generator, 'train', overlap, min_area)

    with open(f"{clf_name}_testB_metrics.txt", 'w') as fp:
        print("Test B")
        print("Precision\tRecall\tMCC", file=fp)
        print(metrics.mean(axis=0), file=fp)
        print(np.median(metrics, axis=0), file=fp)

    np.save(f"{clf_name}_testB_metrics.npy", metrics)
class TestGame():
    def testFourBoards(self):
        self.evalutor_state = Evaluator()

        self.game_states = [
            # GameState(1, 0, 1, 1, 2)

            # GameState(1, 0, 1, 4, 2)
            GameState(1, 1, 2, 4, 1),
            # GameState(0, 0, 1, 2, 4),
            #
            # GameState(1, 3, 3, 0, 4),
            # GameState(1, 3, 3, 0, 2),
            #
            # GameState(1, 0, 3, 0, 4),
            # GameState(1, 0, 3, 0, 2),
            #
            # GameState(1, 0, 1, 3, 4),
            # GameState(1, 0, 1, 0, 2),
        ]
        for i in self.game_states:
            gs_rating = self.evalutor_state.evaluate(i, 1)
            print("utility : ", gs_rating, end='\t state: ')
            i.print()

        Ai = AiMove2()
        Ai.predictMove(self.game_states[0])
Beispiel #7
0
def render_function(f):
    """ draws a function passed as a string """
    ev = Evaluator(f)

    prev_point_real = None
    prev_point_imag = None
    for x in drange(-8, 8.001, 0.001):
        try:
            y = ev.evaluate(x=x)
            y = complex(y)
            new_point_real = apply(x * (341 / 8), y.real * (197 / 6))
            # these are just scale values, they should be precalculated
            new_point_imag = apply(x * (341 / 8), y.imag * (197 / 6))
            # and stored in a var
            if prev_point_real is not None:
                pygame.draw.line(screen, (0, 128, 255), prev_point_real,
                                 new_point_real, 2)
            if prev_point_imag is not None:
                pygame.draw.line(screen, (255, 128, 0), prev_point_imag,
                                 new_point_imag, 2)
            prev_point_real = new_point_real
            prev_point_imag = new_point_imag
        except (ValueError, OverflowError, EvaluationError):
            prev_point_real = None
            prev_point_imag = None

    pygame.display.flip()
    return
Beispiel #8
0
def train(config_name, nparallel):
    with open(config_name, "r") as f:
        config = json.load(f)   
    train_csv_root =  config["learning_param"]["train_csv_root"]
    train_target_table = config["learning_param"].get("train_target_table_file", "seishitu_codeblue_wo_future.csv")
    train_loader = DataLoader(config, train_csv_root)
    train_handler = DataHandler(train_loader,config, train_target_table)
    test_loader = DataLoader(config, config["learning_param"]["test_csv_root"])
    test_target_table = config["learning_param"].get("test_target_table_file", "seishitu_codeblue_wo_future.csv")
    test_handler = DataHandler(test_loader,config,test_target_table)
    evaluator = Evaluator(config, test_loader, test_handler)
    print("building_test_data")
    evaluator.build_dataset(100.0, nparallel)
    n_repeat = config["learning_param"]["n_repeat"]
    with Pool(processes=nparallel) as p:
        models = [p.apply_async(train_one, (config, train_loader, train_handler, i)) for i in range(n_repeat)]
        res = np.array([evaluator.evaluate(model.get()) for model in models])
    
    averages = np.mean(res, axis = 0, keepdims=False)
    variances = np.var(res, axis = 0, keepdims=False)
    config["results"] = dict()
    config["results"]["nData_mean"] = list(averages[:,0])
    config["results"]["nData_var"] = list(variances[:,0])
    config["results"]["nCPA_mean"] = list(averages[:,1])
    config["results"]["nCPA_var"] = list(variances[:,1])
    config["results"]["n_nonCPA_mean"] = list(averages[:,2])
    config["results"]["n_nonCPA_var"] = list(variances[:,2])
    config["results"]["accuracy_mean"] = list(averages[:,3])
    config["results"]["accuracy_var"] = list(variances[:,3])
    config["results"]["PPV_mean"] = list(averages[:,4])
    config["results"]["PPV_var"] = list(variances[:,4])
    config["results"]["NPV_mean"] = list(averages[:,5])
    config["results"]["NPV_var"] = list(variances[:,5])
    config["results"]["sensitivity_mean"] = list(averages[:,6])
    config["results"]["sensitivity_var"] = list(variances[:,6])
    config["results"]["specificity_mean"] = list(averages[:,7])
    config["results"]["specificity_var"] = list(variances[:,7])
    config["results"]["f_val_mean"] = list(averages[:,8])
    config["results"]["f_val_var"] = list(variances[:,8])
    config["results"]["auc_score_mean"] = list(averages[:,9])
    config["results"]["auc_score_var"] = list(variances[:,9])
    config["results"]["youden_index_mean"] = list(averages[:,10])
    config["results"]["youden_index_var"] = list(variances[:,10])
    config["results"]["tp_mean"] = list(averages[:,11])
    config["results"]["tp_var"] = list(variances[:,11])
    config["results"]["fp_mean"] = list(averages[:,12])
    config["results"]["fp_var"] = list(variances[:,12])
    config["results"]["fn_mean"] = list(averages[:,13])
    config["results"]["fn_var"] = list(variances[:,13])
    config["results"]["tn_mean"] = list(averages[:,14])
    config["results"]["tn_var"] = list(variances[:,14])

    
    
    with open(config_name[:-5]+".res.json", "w") as f:
        json.dump(config, f)
Beispiel #9
0
class Engine(object):
    def __init__(self, knowledge_file_path):
        self.knowledge_base = []
        self.values_table = {}
        self.evaluator = Evaluator(self.values_table)
        self.load_knowledge(knowledge_file_path)

    def __load_file_content(self, file):
        file = json.load(file)
        desc = file['description']
        rules = file['rules']
        return desc, rules

    def __get_description_info(self, item):
        #return value, description
        if type(item) == bool:
            return item, ''
        elif type(item) == str:
            return '', item
        elif type(item) == list:
            if type(item[0]) == bool and type(item[1]) == str:
                return item[0], item[1]
            else:
                raise Exception(f'{item} should be [bool, str].')
        else:
            raise Exception(f'Invalid description {item}.\
Acepted formats: bool, str, [bool, str].')

    def load_descriptions(self, desc):
        print('Loading descriptions...')
        for key in desc.keys():
            v, d = self.__get_description_info(desc[key])
            self.values_table[key] = Value(key, value=v, description=d)

    def load_rules(self, rules):
        print('Loading rules...')
        for i in rules:
            rule = Rule(i)
            self.knowledge_base.append(rule)
            # associa uma um valor (A) às regras nas quais ele
            # aparece no lado direito de uma regra
            self.values_table[rule.right].add_rule(rule)

    def load_knowledge(self, file_path):
        with open(file_path, 'r') as file:
            desc, rules = self.__load_file_content(file)
            self.load_descriptions(desc)
            self.load_rules(rules)

    def evaluate(self, expression):
        return self.evaluator.evaluate(expression)
Beispiel #10
0
 def test_evaluate_all_dataset(self):
     evaluation_result = Evaluator.evaluate(ComplexTest.dataset_path,
                                            "JDeodorant")
     self.assertEqual(self.numberOfGood_all, evaluation_result.numberOfGood)
     self.assertEqual(self.numberOfGood_all_found,
                      evaluation_result.numberOfFoundGood)
     self.assertEqual(self.numberOfBad_all, evaluation_result.numberOfBad)
     self.assertEqual(self.numberOfBad_all_found,
                      evaluation_result.numberOfFoundBad)
     self.assertEqual(self.numberOfOthers_all_found,
                      evaluation_result.numberOfFoundOthers)
     self.assertEqual(self.goodPrecision,
                      evaluation_result.get_good_precision())
     self.assertEqual(self.goodRecall, evaluation_result.get_good_recall())
     self.assertEqual(self.badPrecision,
                      evaluation_result.get_bad_precision())
     self.assertEqual(self.badRecall, evaluation_result.get_bad_recall())
Beispiel #11
0
def render_function(f,
                    x_axis,
                    f_axis,
                    point_color=(255, 0, 0),
                    color=(0, 0, 0)):
    """ draws a function map between two number lines """
    pygame.event.get()
    # this will ensure that the window doesn't stop responding during animation
    ev = Evaluator(f)
    screen.blit(create_text_surface("f(x) = " + f, 32), (10, 10))

    for x in drange(2 * x_axis.start, 2 * x_axis.end + 1, STEP):
        try:
            x_point = x_axis.as_pygame(x)
            f_point = f_axis.as_pygame(ev.evaluate(x=x))
            render_connection(x_point, f_point, point_color, color,
                              x_axis.start <= x <= x_axis.end)
        except (EvaluationError, TypeError):
            pass
    return
Beispiel #12
0
def render_function(window, f, color, settings, deriv=False):
    """ draw a function """
    if deriv:
        ev = Evaluator("0");
        try:
            ev.parser.tree = simplify(derive_tree(f));
        except:
            ev.parser.tree = derive_tree(f);
    else:
        ev = Evaluator(f);

    prev_point = None;
    for x in drange(settings.start_x, settings.stop_x + settings.precision, settings.precision):
        try:
            new_point = apply(x * settings.x_scale, ev.evaluate(x=x) * settings.y_scale, settings.x_offset, settings.y_offset);
            if prev_point != None and abs(prev_point[1] - new_point[1]) <= 200:
                pygame.draw.line(window, color, prev_point, new_point, settings.weight);
            prev_point = new_point;
        except (EvaluationError, TypeError):
            prev_point = None;
    return;
Beispiel #13
0
def plot_function(function, rectangles=True):
    """ plot a function and it's rectangles (if enabled) """
    ev = Evaluator(function);
    prev_point, area = None, 0;
    for x in drange(x_start, x_stop+precision, precision):
        try:
            y = ev.evaluate(x=x);
            new_point = to_pygame(*scale(x, y));
            if prev_point is not None:
                if math.hypot(new_point[0] - prev_point[0], new_point[1] - prev_point[1]) < continuity_threshold:
                    pygame.draw.line(screen, function_color, prev_point, new_point, function_weight);
            if rectangles and not round(x, 2) % rectangle_frequency:
                pygame.draw.rect(screen, rectangle_color, (new_point[0], min(new_point[1], HHEIGHT), rectangle_width, abs(y * y_scale)));
                pygame.draw.rect(screen, rectangle_border_color, (new_point[0], min(new_point[1], HHEIGHT), rectangle_width, abs(y * y_scale)), 1);
                area += y * (rectangle_width / x_scale);
            prev_point = new_point;
        except EvaluationError:
            prev_point = None;
    if rectangles:
        return area;
    else:
        return None;
Beispiel #14
0
def render_function(radius,
                    theta,
                    color=(0, 0, 0),
                    start=-8 * math.pi,
                    stop=8 * math.pi,
                    precision=0.001):
    """ draw a 'parametric polar' function """
    radius, theta = Evaluator(radius), Evaluator(theta)

    prev_point = None
    for j in drange(start, stop, precision):
        try:
            r_j, t_j = radius.evaluate(u=j), theta.evaluate(u=j)
            x, y = vector_by_direction(t_j, r_j)
            new_point = apply(x * 25, y * 25)
            if prev_point != None and abs(prev_point[1] - new_point[1]) <= 200:
                pygame.display.update(
                    pygame.draw.line(screen, color, prev_point, new_point, 2))
            prev_point = new_point
        except (EvaluationError, TypeError):
            prev_point = None
        pygame.event.get()
    return
Beispiel #15
0
def train_experiment(experiment):
    '''Train for an experiment, and compute metrics on training and validation set.

    experiment is a dictionary with three required entries:
        - datagen -> the DataGenerator class
        - model -> the Model class
        - clf_name -> name of the classifier, which will be used to set the saved file names'''
    try:
        DataGenerator = experiment['datagen']
        Model = experiment['model']
        clf_name = experiment['name']
    except KeyError:
        print(
            "Experiment dictionary must be contain dataget, model and name keys."
        )

    # optional parameters
    lr = experiment.get('lr', 1e-4)
    eps = experiment.get('eps', 1e-8)
    overlap = experiment.get('overlap', 'minimum')
    max_epochs = experiment.get('max_epochs', 200)
    patience = experiment.get('patience', 15)
    directory = experiment.get('directory', "D:/Adrien/dataset/GlaS/train")
    image_size = experiment.get('image_size', (256, 384))
    batch_size = experiment.get('batch_size', 5)
    validation_size = experiment.get('validation_size', 10)
    min_area = experiment.get('min_area', 4000)

    tf.keras.backend.clear_session()

    # Training
    generator = DataGenerator(batch_size, validation_size, directory,
                              image_size)
    model = Model(image_size, clf_name, lr=lr, eps=eps)
    history = model.fit(max_epochs, generator, patience=patience)

    plt.figure()
    plt.subplot(2, 1, 1)
    plt.plot(history.history['val_loss'], 'r-')
    plt.plot(history.history['loss'], 'b-')
    plt.subplot(2, 1, 2)
    plt.plot(history.history['val_accuracy'], 'r-')
    plt.plot(history.history['accuracy'], 'b-')
    plt.savefig(f'{clf_name}_history.png')

    # Compute & save metrics
    tile = isinstance(generator, TileDataGenerator)
    train_metrics = Evaluator.evaluate(model, generator, 'train', overlap,
                                       min_area)
    val_metrics = Evaluator.evaluate(model, generator, 'val', overlap,
                                     min_area)

    with open(f"{clf_name}_metrics.txt", 'w') as fp:
        print("Training perfomance:", file=fp)
        print("Precision\tRecall\tMCC", file=fp)
        print(train_metrics.mean(axis=0), file=fp)
        print(np.median(train_metrics, axis=0), file=fp)
        print(" ---- ", file=fp)
        print("Validation perfomance:", file=fp)
        print("Precision\tRecall\tMCC", file=fp)
        print(val_metrics.mean(axis=0), file=fp)
        print(np.median(val_metrics, axis=0), file=fp)

    np.save(f"{clf_name}_metrics_train.npy", train_metrics)
    np.save(f"{clf_name}_metrics_val.npy", val_metrics)
#
# Min Lee
# [email protected]
# MacOS
# Python
#
# In accordance with the class policies and Georgetown's Honor Code,
# I certify that, with the exceptions of the class resources and those
# items noted below, I have neither given nor received any assistance
# on this project.
#


import sys
from Classifier import backprop
from Evaluator import Evaluator

classifier = backprop(sys.argv)
evaluator = Evaluator(sys.argv)
performance = evaluator.evaluate(classifier, sys.argv)

print performance
Beispiel #17
0
 def _evaluate(self, project, modelName, resultPath, numClass):
     evaluator = Evaluator(project, modelName, resultPath, self.manuals,
                           self.issues, numClass)
     evaluator.evaluate(tp='title')
class AiMove2():
    def __init__(self):
        self.evaluator = Evaluator()
        self.all_depth = 3
        self.finger = 0
        self.is_win = None

    def predictMove(self, game_state: GameState):
        # util_value, prob_state = self.tapToAll(game_state)
        # prob_state_iter = prob_state.copy()
        # best_util = None
        # best_state = []
        # finger = 0
        # best_util, all_next_state, finger = self.findMove(game_state, 0, best_util, finger)
        # print("FIRST ANSWER  : ", finger, " util: ",best_util, " ",end='')
        # all_next_state[finger].print()
        # print("-------------------------------")
        # self.findAllMove(game_state,1)
        print("HASIL : ",self.minimax(game_state,self.all_depth))
        print("FINGER : ",self.finger)
        print("FINGER WIN : ", self.is_win)
        if self.is_win is None:
            return self.finger
        else:
            return self.is_win
        pass

    def minimax(self, game_state: GameState, depth):
        # print("USING RECURSIVE")
        now_util = self.evaluator.evaluate(game_state,1)
        if depth == 0:
            return now_util
        finger = 0
        if game_state.player == 1: #MAX
            print("MAXING ",depth)
            max_utility = -111111
            result_util, probability_move, result_finger, utility_value = self.findMove(game_state, 0, None, 0)
            for i in range(len(probability_move)):
                if utility_value[i] is not None:
                    print("FROM ", max_utility)
                    util = self.minimax(probability_move[i], depth-1)
                    print("IS ", util)
                    max_utility = max(max_utility, util)
                    print("To ", max_utility)
                    if max_utility == util and depth == self.all_depth:
                        self.finger = i
                        if max_utility >= 10000 and self.is_win is None:
                            self.is_win = i
                        print("change finger to ",i, "from dept ",depth)

            return max_utility

        else:
            print("MINIM ",depth)
            min_utility = 111111
            result_util, probability_move, result_finger, utility_value = self.findMove(game_state, 0, None,0)
            for i in range(len(probability_move)):
                if utility_value[i] is not None:
                    util = self.minimax(probability_move[i], depth - 1)
                    min_utility = min(min_utility, util)
                    # print("CHANGE MINIM FROM ",min)
                    # if min_utility == util and (self.all_depth == depth + 1):
                    #     self.finger = i
                    #     print("change finger to ",i)
            return min_utility



    def findAllMove(self, game_state: GameState, iter_i):
        best_util = None
        all_next_state = []
        all_util = []
        finger = 0
        best_util, all_next_state, finger, all_util = self.findMove(game_state, 0, best_util, finger)
        print("--FAM ANSWER  : ", finger, " util: ", best_util, " ", end='')
        all_next_state[finger].print()
        print("-------------------------------")

        if iter_i == 0:
            return
        elif iter_i == 1:
            for i in range(len(all_next_state)):
                if all_util[i] is not None:
                    temp_util = self.findMove(all_next_state[i],1,None,0)
                    if temp_util is not None:
                        if all_next_state[i].player == 1:
                            if all_util[i] < temp_util:
                                all_util[i] = temp_util
                        else:
                            if all_util[i] > temp_util:
                                all_util[i] = temp_util
                        # else:
        else:
            pass

        best_util = None
        for i in range(len(all_util)):

            if all_util[i] is not None:
                if best_util is None:
                    best_util = all_util[i]
                if all_util[i] > best_util:
                    finger = i
                    best_util = all_util[i]
        print("--FAM ANSWER 2 : ", finger, " util: ", best_util, " ", end='')
        all_next_state[finger].print()
        print(all_util)
        print("-------------------------------")

    def findMove(self, game_state: GameState, iter_i, result_util, result_finger):

        self.eval = Evaluator()

        player_left = game_state.values[0][0]
        player_right = game_state.values[0][1]
        ai_left = game_state.values[1][0]
        ai_right = game_state.values[1][1]

        probability_move = []
        utility_value = [0, 0, 0, 0, 0]

        if (game_state.player == 1):  # Ai turn
            probability_move.append(
                GameState(0, (player_left + ai_left) % 5, player_right, ai_left, ai_right))
            probability_move.append(
                GameState(0, player_left, (player_right + ai_left) % 5, ai_left, ai_right))
            probability_move.append(
                GameState(0, (player_left + ai_right) % 5, player_right, ai_left, ai_right))
            probability_move.append(
                GameState(0, player_left, (player_right + ai_right) % 5, ai_left, ai_right))
            if ((ai_left + ai_right) % 2 == 0):
                probability_move.append(
                    GameState(0, player_left, player_right, int((ai_left + ai_right) / 2),
                              int((ai_left + ai_right) / 2)))
        else:  # Player turn
            probability_move.append(
                GameState(1, player_left, player_right, (player_left + ai_left) % 5, ai_right))
            probability_move.append(
                GameState(1, player_left, player_right, (player_right + ai_left) % 5, ai_right))
            probability_move.append(
                GameState(1, player_left, player_right, ai_left, (player_left + ai_right) % 5))
            probability_move.append(
                GameState(1, player_left, player_right, ai_left, (player_right + ai_right) % 5))
            if ((player_left + player_right) % 2 == 0):
                probability_move.append(
                    GameState(1, int((player_left + player_right) / 2), int((player_left + player_right) / 2), ai_left,
                              ai_right))
        pass

        if ai_left == 0:
            utility_value[0] = utility_value[1] = None
        if ai_right == 0:
            utility_value[2] = utility_value[3] = None
        if player_left == 0:
            utility_value[0] = utility_value[2] = None
        if player_right == 0:
            utility_value[1] = utility_value[3] = None

        for i in range(len(probability_move)):
            print("i:",i ,end=' \t')
            probability_move[i].print()

            if ((ai_left == ai_right) or (((ai_left+ai_right) %2)!=0))  and probability_move[i].player == 0:
                utility_value[4] = None
            if ((player_left == player_right) or ((player_left+player_right) %2) != 0) and probability_move[i].player == 1:
                utility_value[4] = None
            if utility_value[i] is not None:
                utility_value[i] = self.eval.evaluate(probability_move[i],1)

        print("UTILITY ", utility_value)

        for i in range(len(utility_value)):
            if utility_value[i] is not None:
                if result_util is None:
                    result_util = utility_value[i]
                    result_finger = i
                elif probability_move[i].player == 0:
                    if result_util < utility_value[i]:
                        result_util = utility_value[i]
                        result_finger = i
                else:
                    if result_util > utility_value[i]:
                        result_util = utility_value[i]
                        result_finger = i

        print("MAX UTIL ",result_util, " iter:", iter_i)
        # return  result_util, probability_move[result_finger], result_finger
        if iter_i == 0:
            return result_util, probability_move, result_finger, utility_value
        elif iter_i == 1:
            print("REUSL UTIL : ",result_util)
            return result_util
        else:
            for i in range(len(utility_value)):
                if utility_value[i] is not None:
                    self.findAllMove(probability_move[i],iter_i-1)
class AiMove():
    layer_tree = 3
    best_state = 0
    best_utility = 0
    which_finger = 0

    def predictMove(self, game_state: GameState):
        self.eval = Evaluator()
        probability_move = [game_state]

        utilityValue = []
        tr2 = []
        tr = probability_move

        # print(" PROB -------")
        tr[0].print()
        # print(" PROB 2-------")
        tr, _, _ = self.tapToAll(tr[0])
        for i in range(len(tr)):
            # self.eval.evaluate(tr[i], 1)
            utilityValue.append(self.eval.evaluate(tr[i], 1))

        # print("BEST ",AiMove.best_utility, "Finger" ,AiMove.which_finger,"\tstate ", end='')
        # AiMove.best_state.print()
        # tr[1].print()
        # print("PERTAMA " ,utilityValue)

        # for i in range(len(tr)):
        #     # self.eval.evaluate(tr[i], 1)
        #     # print("ITER KE DUA", i)
        #     # tr[i].print()
        #     tr2, be_util, be_state = self.tapToAll(tr[i])
        #     # print("berapa kali sih ", be_util)
        #     if utilityValue[i] > be_util:
        #         utilityValue[i] = be_util

        # print("KEDUA ", utilityValue)

        best_finger = 0
        best_util = 0
        best_state = None
        i = 0
        for i in range(len(utilityValue)):
            if i == 0:
                best_util = utilityValue[i]
            else:
                # print("ACCES ",i," ", utilityValue[i])
                if utilityValue[i] > best_util:
                    best_util = utilityValue[i]
                    best_finger = i

        best_state = tr[best_finger]
        return best_finger, best_util, best_state
        # 0 ai kiri ke kiri
        # 1 ai kiri ke kanan
        # 2 ai kanan ke kiri
        # 3 ai kanan ke kanan
        # 4 ai divide

        # for i in range(len(tr2)):
        #     # self.eval.evaluate(tr[i], 1)
        #     print("ITER KE TIGA", i)
        #     tr[i].print()
        #     tr3, be_util, be_state = self.tapToAll(tr2[i])
        #     print("berapa kali sih2 ", be_util)
        #     if utilityValue[i] < be_util:
        #         utilityValue[i] = be_util
        #
        # print("KETIGA ", utilityValue)
        #
        # for i in range(len(tr)):
        #     # self.eval.evaluate(tr[i], 1)
        #     print("ITER KE EMPAT", i)
        #     tr[i].print()
        #     tr2, be_util, be_state = self.tapToAll(tr[i])
        #     print("berapa kali sih2 ", be_util)
        #     if utilityValue[i] > be_util:
        #         utilityValue[i] = be_util
        #
        # print("KEEMPAT ", utilityValue)

    def tapToAll(self, game_state: GameState):
        probability_move = []
        # print(" PROB 2-------")
        player_left = game_state.values[0][0]
        player_right = game_state.values[0][1]

        ai_left = game_state.values[1][0]
        ai_right = game_state.values[1][1]

        if (game_state.player == 1):  #Ai turn
            probability_move.append(
                GameState(0, (player_left + ai_left) % 5, player_right,
                          ai_left, ai_right))
            probability_move.append(
                GameState(0, player_left, (player_right + ai_left) % 5,
                          ai_left, ai_right))
            probability_move.append(
                GameState(0, (player_left + ai_right) % 5, player_right,
                          ai_left, ai_right))
            probability_move.append(
                GameState(0, player_left, (player_right + ai_right) % 5,
                          ai_left, ai_right))
            if ((ai_left + ai_right) % 2 == 0):
                probability_move.append(
                    GameState(0, player_left, player_right,
                              int((ai_left + ai_right) / 2),
                              int((ai_left + ai_right) / 2)))
        else:  #Player turn
            probability_move.append(
                GameState(1, player_left, player_right,
                          (player_left + ai_left) % 5, ai_right))
            probability_move.append(
                GameState(1, player_left, player_right,
                          (player_right + ai_left) % 5, ai_right))
            probability_move.append(
                GameState(1, player_left, player_right, ai_left,
                          (player_left + ai_right) % 5))
            probability_move.append(
                GameState(1, player_left, player_right, ai_left,
                          (player_right + ai_right) % 5))
            if ((player_left + player_right) % 2 == 0):
                probability_move.append(
                    GameState(1, int((player_left + player_right) / 2),
                              int((player_left + player_right) / 2), ai_left,
                              ai_right))
        # for i in range(len(probability_move)):
        #     probability_move[i].print()
        # pass
        #
        for i in range(len(probability_move)):
            rating = self.eval.evaluate(probability_move[i], 1)
            if i == 0:
                AiMove.best_utility = rating
                AiMove.best_state = probability_move[i]
            if probability_move[i].player == 0:
                if rating > AiMove.best_utility:
                    AiMove.best_utility = rating
                    AiMove.best_state = probability_move[i]
                    AiMove.which_finger = i
            else:
                if rating < AiMove.best_utility:
                    AiMove.best_utility = rating
                    AiMove.best_state = probability_move[i]
                    AiMove.which_finger = i
            # print("utility", rating, end='\t State : ')
            # probability_move[i].print()

        # print(" PROB 2-------")
        # if(AiMove.layer_tree <= 3):
        #     AiMove.layer_tree+=1
        #     for i  in range(len(probability_move)):
        #         print(" PROB222 -------")
        #         probability_move[i].print()
        #         print("utility", self.eval.evaluate(probability_move[i],probability_move[i].player))
        #         self.tapToAll(probability_move[i])
        # print("best from serc: ", AiMove.best_utility)
        return probability_move, AiMove.best_utility, AiMove.best_state
import numpy as np
from GA.Initializer import Heuristic_Initializer
from Task_Initializer import Task
from Evaluator import Evaluator
from copy import deepcopy

task = Task()

init = Heuristic_Initializer()

pop = init.initialize_pop(task)

evaluator = Evaluator()
pop = evaluator.evaluate(pop, task)

for p in pop:
    print(p['fitness'])
Beispiel #21
0
                newAttribute.setName(items.name + str(index))
                newAttribute.addValue(str(0))
                newAttribute.addValue(str(1))
                newAttributes.add(newAttribute)
    else:
        newAttributes.add(items)
newDataSet.setAttributes(newAttributes)
newDataSet.attributes.setClassIndex(len(newAttributes.attributes)-1)
print newAttributes
print newAttributes.classIndex
print newDataSet



classifier = ID3(sys.argv)
evaluator = Evaluator(sys.argv)
performance = evaluator.evaluate(classifier, sys.argv)

print performance

"""
testDecimal = 8
print bin(testDecimal)
for index, c in enumerate(str(bin(testDecimal))):
    if index <=1:
        pass
    else:
        print c
print "LENGTH " + str(len(str(bin(testDecimal)))-2)
"""
exit(0)
Beispiel #22
0
class llvmMultiobjetiveProblem(IntegerProblem):

    def __init__(self, max_epochs: int = 500, filename: str = None, solution_length: int = 100, population_size = int, 
                offspring_population_size = int, verbose: bool = True, upper_bound : int = 86):

        self.llvm = LlvmUtils(llvmpath='/usr/bin/', clangexe='clang-10', optexe='opt-10', llcexe='llc-10')
        self.llvmfiles = LlvmFiles(basepath='./', source_bc='polybench_small/polybench_small_original.bc', 
                                jobid=f'{population_size}_{offspring_population_size}_{solution_length}')
        self.evaluator = Evaluator(runs=0)
        self.number_of_variables = solution_length
        self.lower_bound = [0 for _ in range(self.number_of_variables)]
        self.upper_bound = [upper_bound for _ in range(self.number_of_variables)]
        self.obj_labels = ['codelines', 'tags', 'jumps', 'function_tags', 'calls']
        self.obj_directions = [self.MAXIMIZE, self.MINIMIZE, self.MINIMIZE, self.MINIMIZE, self.MINIMIZE]
        self.number_of_objectives = 5
        self.number_of_constraints = 0
        self.max_epochs = max_epochs
        self.evaluations = 0
        self.epoch = 1
        self.phenotype = 0
        self.population_size = population_size
        self.offspring_population_size = offspring_population_size
        self.dictionary = dict()
        self.verbose = verbose
        self.preloaded_dictionary = f"{self.number_of_variables}_dictionary.data"
        if os.path.exists(self.preloaded_dictionary):
            with open(self.preloaded_dictionary,"r") as file:
                print(f"reading '{self.preloaded_dictionary}'...")
                for line in file.readlines():
                    line = line[:-1] # \n
                    keyvalue = line.split(sep=";")
                    self.dictionary.update({keyvalue[0]:keyvalue[1]})

    def get_name(self):
        return 'Llvm Multiobjective Problem'

    def config_to_str(self):
        return f"{self.population_size}_{self.offspring_population_size}_{self.number_of_variables}_{self.max_epochs}"

    def evaluate(self, solution: IntegerSolution) -> IntegerSolution:
        self.phenotype +=1
        limit = [self.offspring_population_size if self.epoch != 1 else self.population_size]
        if self.phenotype%(limit[0]+1) == 0:
            self.epoch += 1
            self.phenotype = 1
        key = f"{solution.variables}"
        value = self.dictionary.get(key)
        if value == None:
            # Decoding
            passes = ""
            for i in range(self.number_of_variables):
                passes += f" {self.llvm.get_passes()[solution.variables[i]]}"

            # Optimize and generate resources
            self.llvm.toIR(self.llvmfiles.get_original_bc(), self.llvmfiles.get_optimized_bc(), passes=passes)
            self.llvm.toExecutable(self.llvmfiles.get_optimized_bc(), self.llvmfiles.get_optimized_exe())
            self.llvm.toAssembly(self.llvmfiles.get_optimized_bc(), self.llvmfiles.get_optimized_ll())

            # Get measures
            self.evaluator.evaluate(source_ll=self.llvmfiles.get_optimized_ll(), source_exe=self.llvmfiles.get_optimized_exe())
            solution.objectives[0] = self.evaluator.get_codelines()
            solution.objectives[1] = self.evaluator.get_tags()
            solution.objectives[2] = self.evaluator.get_total_jmps()
            solution.objectives[3] = self.evaluator.get_function_tags()
            solution.objectives[4] = self.evaluator.get_calls()
            self.dictionary.update({key: solution.objectives})
            self.evaluator.reset()
        else:
            # Get stored value
            solution.objectives[0] = value[0]
            solution.objectives[1] = value[1]
            solution.objectives[2] = value[2]
            solution.objectives[3] = value[3]
            solution.objectives[4] = value[4]
        
        if self.verbose:
            print("evaluated solution {:3} from epoch {:3} : variables={}, fitness={}"\
                .format(self.phenotype,self.epoch,solution.variables,solution.objectives))
        return solution

    ### FOR TERMINATION CRITERION ###
    def update(self, *args, **kwargs):
        self.evaluations = kwargs['EVALUATIONS']

    ### FOR TERMINATION CRITERION ###
    @property
    def is_met(self):
        met = self.epoch >= self.max_epochs
        if self.phenotype*self.epoch % 100 == 0 or met:
            with open(self.preloaded_dictionary, "w") as file:
                for keys,values in self.dictionary.items():
                    file.write('{};{}\n'.format(keys,values))
        return met
                          optexe='opt-10',
                          llcexe='llc-10')
    llvmfiles = LlvmFiles(
        basepath='./',
        source_bc='polybench_small/polybench_small_original.bc',
        jobid='solution')
    evaluator = Evaluator(runs=10)

    plot_labels = [
        'total codelines', 'codelines', 'labels', 'conditional jumps',
        'unconditional jumps', 'function labels', 'function calls'
    ]

    llvmutils.toAssembly(llvmfiles.get_original_bc(), "original.ll")
    llvmutils.toExecutable(llvmfiles.get_original_bc(), "original.o")
    evaluator.evaluate("original.ll", "./original.o")

    original_results = []
    #original_results.append(evaluator.get_runtime())
    original_results.append(evaluator.get_total_codelines())
    original_results.append(evaluator.get_codelines())
    original_results.append(evaluator.get_tags())
    #original_results.append(evaluator.get_total_jmps())
    original_results.append(evaluator.get_conditional_jmps())
    original_results.append(evaluator.get_unconditional_jmps())
    original_results.append(evaluator.get_function_tags())
    original_results.append(evaluator.get_calls())
    evaluator.reset()

    allpasses = []
Beispiel #24
0
'''
@author: Joshua
'''
from Parser import Parser
from SymbolTable import TableBuilder
from Evaluator import Evaluator
# from PostFixConverter import inToPost
# from LexicalAnalyzer import Lexer

# ''' Creating Parse Tree '''
Parse = Parser('TestProgram.txt')
root = Parse.parse()
# root.printTree()
''' Creating Symbol Table '''
tableBuilder = TableBuilder(root)
tableBuilder.addSymbols()
#tableBuilder.printTable()
''' Running Evaluation '''
symbolTable = tableBuilder.returnTable()
eval = Evaluator(root, symbolTable)
eval.evaluate()
class AiMove():
    layer_tree = 3
    best_state = 0
    best_utility = 0
    which_finger = 0

    def predictMove(self, game_state: GameState):
        self.eval = Evaluator()
        probability_move = [game_state]

        utilityValue = []
        tr2 = []
        tr = probability_move
        if self.eval.evaluate(probability_move[0],1) == 10000:
            print("AI WINNING")
            return -1, 10000, None

        ##print(" PROB -------")
        tr[0].print()
        # ##print(" PROB 2-------")
        tr, _, _  = self.tapToAll(tr[0])
        for i in range(len(tr)):
            # self.eval.evaluate(tr[i], 1)
            utilityValue.append(self.eval.evaluate(tr[i], 1))

        # #print("BEST ",AiMove.best_utility, "Finger" ,AiMove.which_finger,"\tstate ", end='')
        # AiMove.best_state.#print()
        # tr[1].#print()
        #print("PERTAMA " ,utilityValue)

        for i in range(len(tr)):
            # self.eval.evaluate(tr[i], 1)
            #print("ITER KE DUA", i)
            # tr[i].print()
            tr2, be_util, be_state = self.tapToAll(tr[i])
            #print("berapa kali sih ", be_util)
            if utilityValue[i] > be_util:
                utilityValue[i] = be_util

        #print("KEDUA ", utilityValue)

        best_finger = 0
        best_util = 0
        best_state = None
        i = 0
        for i in range(len(utilityValue)):
            if i == 0:
                best_util = utilityValue[i]
            else:
                # #print("ACCES ",i," ", utilityValue[i])
                if utilityValue[i] > be_util:
                    best_util = utilityValue[i]
                    best_finger = i

        best_state = tr[best_finger]
        return best_finger, best_util, best_state
        # 0 ai kiri ke kiri
        # 1 ai kiri ke kanan
        # 2 ai kanan ke kiri
        # 3 ai kanan ke kanan
        # 4 ai divide

        # for i in range(len(tr2)):
        #     # self.eval.evaluate(tr[i], 1)
        #     #print("ITER KE TIGA", i)
        #     tr[i].#print()
        #     tr3, be_util, be_state = self.tapToAll(tr2[i])
        #     #print("berapa kali sih2 ", be_util)
        #     if utilityValue[i] < be_util:
        #         utilityValue[i] = be_util
        #
        # #print("KETIGA ", utilityValue)
        #
        # for i in range(len(tr)):
        #     # self.eval.evaluate(tr[i], 1)
        #     #print("ITER KE EMPAT", i)
        #     tr[i].#print()
        #     tr2, be_util, be_state = self.tapToAll(tr[i])
        #     #print("berapa kali sih2 ", be_util)
        #     if utilityValue[i] > be_util:
        #         utilityValue[i] = be_util
        #
        # #print("KEEMPAT ", utilityValue)


    def tapToAll(self, game_state: GameState):
        probability_move = []
        #print(" PROB 2-------")
        player_left = game_state.values[0][0]
        player_right = game_state.values[0][1]

        ai_left = game_state.values[1][0]
        ai_right = game_state.values[1][1]

        blocked_finger = [0,0,0,0,0]
        blocked_finger_pl = [0, 0, 0, 0,0]

        if player_left == 0:
            blocked_finger_pl[0] = 1
            blocked_finger_pl[1] = 1
        if player_right == 0:
            blocked_finger_pl[2] = 1
            blocked_finger_pl[3] = 1
        if ai_left == 0:
            blocked_finger[0] = 1
            blocked_finger[1] = 1
        if ai_right == 0:
            blocked_finger[2] = 1
            blocked_finger[3] = 1

        if(game_state.player == 1): #Ai turn
            probability_move.append(
                GameState(0, (player_left + ai_left) % 5, player_right, ai_left, ai_right))
            probability_move.append(
                GameState(0, player_left, (player_right + ai_left) % 5, ai_left, ai_right))
            probability_move.append(
                GameState(0, (player_left + ai_right) % 5, player_right, ai_left, ai_right))
            probability_move.append(
                GameState(0, player_left, (player_right + ai_right) % 5, ai_left, ai_right))
            if ((ai_left + ai_right) % 2 ==0):
                probability_move.append(
                    GameState(0, player_left, player_right, int((ai_left + ai_right)/2), int((ai_left + ai_right)/2)))
        else: #Player turn
            probability_move.append(
                GameState(1, player_left, player_right, (player_left + ai_left) % 5,ai_right))
            probability_move.append(
                GameState(1, player_left, player_right, ai_left, (player_left + ai_right) % 5))
            probability_move.append(
                GameState(1, player_left, player_right, (player_right + ai_left) % 5, ai_right))
            probability_move.append(
                GameState(1, player_left, player_right, ai_left, (player_right + ai_right) % 5))
            if ((player_left + player_right) % 2 ==0):
                probability_move.append(
                    GameState(1, int((player_left + player_right)/2), int((player_left + player_right)/2), ai_left, ai_right))
        # for i in range(len(probability_move)):
        #     probability_move[i].#print()
        # pass
        #
        for i in range(len(probability_move)):
            rating = self.eval.evaluate(probability_move[i],1)
            # if i == 0:
            #     AiMove.best_utility = rating
            #     AiMove.best_state = probability_move[i]
            if probability_move[i].player == 0:
                if i == 0:
                    AiMove.best_utility = -11111
                if rating > AiMove.best_utility and (blocked_finger[i] != 1 and blocked_finger_pl[i] != 1):
                    #print("INI MASUK ai", i)
                    AiMove.best_utility = rating
                    AiMove.best_state = probability_move[i]
                    AiMove.which_finger = i
            else:
                if i == 0:
                    AiMove.best_utility = 11111
                #print("i : ", i, " block ", blocked_finger_pl[i], "block ai ", blocked_finger[i])
                if rating < AiMove.best_utility and (blocked_finger_pl[i] != 1 and blocked_finger[i] != 1):
                    #print("INI MASUK ",i)
                    AiMove.best_utility = rating
                    AiMove.best_state = probability_move[i]
                    AiMove.which_finger = i
            #print("utility", rating, end='\t State : ')
            probability_move[i].print()

        #print("best from serc: ", AiMove.best_utility)
        return probability_move, AiMove.best_utility, AiMove.best_state
Beispiel #26
0
            heuristicName = sys.argv[4].strip()
            print 
        else:
            raise Exception('Invalid Input! Usage: >> python main.py <domainfile> <problemfile> -h <heuristic_name>')
    except:
        heuristicName = 'equality'
        print bcolors.OKGREEN + "--> Default heuristic 'equality'" + bcolors.ENDC

    # parse SAS/PDDL data #
    listOfPredicates, initialState, goalState, listOfActions, compliantConditions, goalCompliantConditions = grounded_data.returnParsedData()

    # generate transformation #
    Mrref, M = compute_transform(listOfPredicates, listOfActions, goalCompliantConditions, debug_flag)

    # evaluate #
    evaluation_object = Evaluator(listOfPredicates, listOfActions, initialState, goalState, compliantConditions, goalCompliantConditions, Mrref, M, cost_flag)
    print bcolors.HEADER + "\n>> Initial state evaluation = " + bcolors.OKBLUE + str(float(evaluation_object.evaluate(initialState, heuristicName))) + bcolors.ENDC    
    sys.exit(0)
    
    # solve #
    plan_object = Planner(listOfPredicates, listOfActions, initialState, goalState, compliantConditions, goalCompliantConditions, Mrref, M, cost_flag)
    plan, cost  = plan_object.aStarSearch(heuristicName)

    if plan:    
        print bcolors.HEADER + "\n>> FINAL PLAN\n--> " + bcolors.OKBLUE + '\n--> '.join(plan) + "\n" + bcolors.OKGREEN + "\nCost of Plan: " + str(cost) + '\n' + bcolors.ENDC
    else:
        if cost == 0.0:
            print bcolors.HEADER + "*** NO PLAN REQUIRED ***" + bcolors.ENDC
        else:
            print bcolors.HEADER + "*** NO PLAN FOUND ***" + bcolors.ENDC    
Beispiel #27
0
def cotrain(dataset,
            source_language,
            target_language,
            translation_dict_path,
            seed_words_path,
            vocab_path,
            teacher_args,
            student_args,
            train_size=10000,
            num_seeds=30,
            remove_stopwords=True,
            use_seed_weights=True,
            manual_seed=0,
            sw_train_size=10000,
            logger=None,
            metric='acc',
            alignment_method='google_translate',
            tokenizer_method='spacy',
            num_iter=2):
    def printstr(s):
        if logger:
            logger.info(s)
        else:
            print(s)

    dh = DataHandler(dataset=dataset, logger=logger)
    num_labels = len(dh.label2ind)

    # Load training data in target language
    printstr('Loading train {} data'.format(target_language))
    train_df = dh.load_df(method='unlabeled',
                          language=target_language,
                          train_size=train_size)

    # Load tokenizer for target language
    tokenizer = Tokenizer(language=target_language,
                          tokenizer_method=tokenizer_method,
                          vocab_loadfolder=vocab_path,
                          remove_stopwords=remove_stopwords,
                          ngram_range=(1, 1),
                          min_freq=1,
                          max_freq_perc=1.0)
    word2ind = tokenizer.word2ind

    # Seed Word Extraction
    printstr("Extracting Seed words")
    missing_translation_strategy = 'keep' if dataset == 'twittersent' else 'delete'
    seedword_extractor = SeedwordExtractor(
        source_language=source_language,
        target_language=target_language,
        num_seeds=num_seeds,
        label2ind=dh.label2ind,
        alignment_method=alignment_method,
        translation_dict_path=translation_dict_path,
        missing_translation_strategy=missing_translation_strategy,
        tokenizer=tokenizer.tokenizer,
        word2ind=word2ind,
        logger=logger)
    source_df = dh.load_df(method='train',
                           language=source_language,
                           train_size=sw_train_size)
    source_tokenizer = Tokenizer(language=source_language,
                                 vocab_loadfolder=vocab_path,
                                 tokenizer_method=tokenizer_method)
    source_weight_dict, intercept = seedword_extractor.extract_seedwords(
        df=source_df,
        language=source_language,
        seedword_savefolder=seed_words_path,
        tokenizer=source_tokenizer.tokenizer,
        tokenizer_method=tokenizer_method,
        stopwords=source_tokenizer.stopwords)
    seed_word_dict = seedword_extractor.get_target_seedwords(
        source_seedword_dict=source_weight_dict)

    if len(seed_word_dict) == 0:
        printstr("ERROR: There are 0 target seed words. Skipping experiment")
        return {
            'teacher_args': teacher_args,
            'student_args': student_args,
            'teacher_train_res': defaultdict(int),
            'teacher_dev_res': defaultdict(int),
            'teacher_test_res': defaultdict(int),
            'student_dev_res': defaultdict(int),
            'student_test_res': defaultdict(int),
            'student_train_res': defaultdict(int),
        }

    # Initialize Teacher in Target Language
    # report_seedwords(seed_word_dict, ind2label=dh.ind2label, print_fn=printstr)
    teacher = Teacher(word2ind=word2ind,
                      aspect2ind=dh.label2ind,
                      seed_word_dict=seed_word_dict,
                      verbose=True,
                      teacher_args=teacher_args,
                      tokenizer=tokenizer.tokenizer,
                      stopwords=tokenizer.stopwords)
    teacher.train(df=train_df,
                  teacher_weight_dict=seed_word_dict,
                  teacher_bias=intercept,
                  num_classes=num_labels)

    # Initialize Student in Target Language:
    if num_iter > 1:
        # For the first iteration, use LogReg
        actual_student_name = student_args['model_name']
        student_args['model_name'] = 'logreg'
    student = Student(language=target_language,
                      label2ind=dh.label2ind,
                      student_args=student_args,
                      tokenizer=tokenizer.tokenizer,
                      manual_seed=manual_seed)

    ev = Evaluator(aspect2ind=dh.label2ind, logger=logger, metric=metric)

    # Test Teacher in Target Language
    test_df = dh.load_df(method='test', language=target_language)
    test_df['teacher_pred_id'] = teacher.predict(test_df)
    teacher_test_res = ev.evaluate(
        test_df,
        pred_col='teacher_pred_id',
        true_col='label',
        descr='Teacher Test {}'.format(target_language))

    printstr('Loading dev {} data'.format(target_language))
    dev_df = dh.load_df(method='dev', language=target_language)
    dev_df['teacher_pred_id'] = teacher.predict(dev_df)
    teacher_dev_res = ev.evaluate(
        dev_df,
        pred_col='teacher_pred_id',
        true_col='label',
        descr='Teacher Dev {}'.format(target_language))

    # Apply Teacher on unlabeled Train examples (Target Language)
    printstr("Applying Teacher on {} train data ({})".format(
        train_df.shape[0], target_language))
    train_df['teacher_pred_id'] = teacher.predict(train_df)
    teacher_train_res = ev.evaluate(
        train_df,
        pred_col='teacher_pred_id',
        true_col='label',
        descr='Teacher Train {}'.format(target_language))

    # Ignore "bad" Teacher's predictions...
    train_df_teacher = train_df[train_df['teacher_pred_id'] != -1]
    train_df_teacher['teacher_label'] = train_df_teacher[
        'teacher_pred_id'].map(lambda x: dh.ind2label[x])

    # balance classes
    min_class_support = train_df_teacher['teacher_label'].value_counts().min()
    train_df_teacher = train_df_teacher.groupby(
        'teacher_label', group_keys=False).apply(
            lambda x: x.sample(min_class_support, random_state=42))

    # Train Student on Target Language
    printstr(
        "Training Student using Teacher's predictions on {} documents ({})".
        format(train_df_teacher.shape[0], target_language))
    student.train(train_df_teacher,
                  eval_df=dev_df,
                  label_name='teacher_label',
                  eval_label_name='label')

    # Apply Student as Teacher on target language
    if num_iter == 2:
        train_df['student_pred_id'] = student.eval(train_df,
                                                   label_name='label')
        printstr("Applying Student on train data...")
        student_train_res = ev.evaluate(
            train_df,
            pred_col='student_pred_id',
            true_col='label',
            descr='Student Train {}'.format(target_language))

        # Retrain Student on target language
        if actual_student_name != 'logreg':
            student_args['model_name'] = actual_student_name
            student = Student(language=target_language,
                              label2ind=dh.label2ind,
                              student_args=student_args,
                              tokenizer=tokenizer.tokenizer,
                              manual_seed=manual_seed)

        printstr(
            "Re-training student on training data using student's predictions")
        train_df['student_label'] = train_df['student_pred_id'].map(
            lambda x: dh.ind2label[x])
        if dh.dataset == 'cls':
            # balance positive and negative class
            min_class_support = train_df['student_label'].value_counts().min()
            train_df = train_df.groupby(
                'student_label', group_keys=False).apply(
                    lambda x: x.sample(min_class_support, random_state=42))
        if actual_student_name == 'mbert':
            source_df['student_label'] = source_df['label']
            crosslingual_df = pd.concat([source_df, train_df])
            crosslingual_df.sample(frac=1)
            student.train(crosslingual_df,
                          eval_df=dev_df,
                          label_name='student_label',
                          eval_label_name='label')
        else:
            student.train(train_df,
                          eval_df=dev_df,
                          label_name='student_label',
                          eval_label_name='label')
    elif num_iter == 1:
        student_train_res = {}
    else:
        raise (BaseException('not implemented: num_iter={}'.format(num_iter)))

    # Evaluate Student on Target Language
    printstr("Evaluating Student on Dev set ({})".format(target_language))
    dev_df['student_pred_id'] = student.eval(dev_df, label_name='label')
    student_dev_res = ev.evaluate(
        dev_df,
        pred_col='student_pred_id',
        true_col='label',
        descr='Student Dev {}'.format(target_language))

    printstr("Evaluating Student on Test set ({})".format(target_language))
    test_df['student_pred_id'] = student.eval(test_df, label_name='label')
    student_test_res = ev.evaluate(
        test_df,
        pred_col='student_pred_id',
        true_col='label',
        descr='Student Test {}'.format(target_language))

    # Save student
    student.save(seed_words_path)
    return {
        'teacher_args': teacher_args,
        'student_args': student_args,
        'teacher_train_res': teacher_train_res,
        'teacher_dev_res': teacher_dev_res,
        'teacher_test_res': teacher_test_res,
        'student_dev_res': student_dev_res,
        'student_test_res': student_test_res,
        'student_train_res': student_train_res,
    }
Beispiel #28
0
def run_model(args, dataset):
    ############################################################################################
    ## Set optimizers and compile NN model
    #
    import keras.optimizers as opt
    clipvalue = 0
    clipnorm = 10
    optimizer = get_optimizer(args)
    loss = 'binary_crossentropy'
    metric = 'accuracy'

    model = create_nn_model(args)
    model.compile(loss=loss, optimizer=optimizer, metrics=[metric])
    logger.info('model compilation completed!')

    ###############################################################################################################################
    ## Training
    #

    # Split dataset into positive and negative subset
    dataset_pos, dataset_neg = H.splitDatasetClass(dataset)
    # Get the random dataset first

    train_x, train_y, dev_x, dev_y, test_x, test_y = H.getDatasetRandomSingleClass(
        dataset, args.test_size * 3, args.test_size, args.test_size)
    if (args.is_equal_distribution):
        train_x, train_y, dev_x, dev_y, test_x, test_y = H.getDatasetRandom(
            dataset_pos, dataset_neg, args.test_size * 3, args.test_size,
            args.test_size)

    ##############################################
    ## Active learning Loop
    #

    counter = 0
    curr_best_acc = 0
    best_acc, best_active_counter = 0, 0
    best_acc_full_len, best_active_counter_full_len = 0, 0
    # Stop the active learning if the test set is larger than the specified amount
    while counter < 200:
        if (len(test_y) >= args.test_amount_limit and curr_best_acc > 0.98):
            break
        if (len(train_y) >= args.train_amount_limit): break
        counter += 1
        if counter > 1:
            logger.info(
                "================ Active Loop %i ====================" %
                counter)
            # model = load_model(args.out_dir_path + '/models/best_model_complete.h5', custom_objects=custom_objects)
            model = load_model_architecture_and_weights(
                args.out_dir_path + '/models/model_arch.json',
                args.out_dir_path + '/models/best_model_weights.h5')
            model.compile(loss=loss, optimizer=optimizer, metrics=[metric])

            train_active_x, train_active_y, dev_active_x, dev_active_y, test_active_x, test_active_y = None, None, None, None, None, None
            if (args.is_equal_distribution):
                (train_active_x, train_active_y, dev_active_x, dev_active_y,
                 test_active_x, test_active_y
                 ) = AL.obtain_data_active_learning_equal_distribution(
                     args, model, dataset_pos, dataset_neg)
            else:
                (train_active_x, train_active_y, dev_active_x, dev_active_y,
                 test_active_x,
                 test_active_y) = AL.obtain_data_active_learning(
                     args, model, dataset)

            # Concatenate additional dataset from active learning with the real dataset
            train_x = np.concatenate((train_x, train_active_x), axis=0)
            train_y = np.concatenate((train_y, train_active_y), axis=0)
            if (len(test_y) < args.test_amount_limit):
                dev_x = np.concatenate((dev_x, dev_active_x), axis=0)
                dev_y = np.concatenate((dev_y, dev_active_y), axis=0)
                test_x = np.concatenate((test_x, test_active_x), axis=0)
                test_y = np.concatenate((test_y, test_active_y), axis=0)
            else:
                # If already exceed the desired test samples, add all to training set
                train_x = np.concatenate((train_x, dev_active_x), axis=0)
                train_y = np.concatenate((train_y, dev_active_y), axis=0)
                train_x = np.concatenate((train_x, test_active_x), axis=0)
                train_y = np.concatenate((train_y, test_active_y), axis=0)

        ############################################################################################
        ## Compute class weight (where data is usually imbalanced)
        #
        class_weight = H.compute_class_weight(
            np.array(train_y, dtype='float32'))

        ###############################################
        ## Real Training Starts
        #
        AL.print_shape(train_x, train_y, dev_x, dev_y, test_x, test_y)

        str_train_x = np.array(train_x)
        str_train_y = np.array(train_y)
        str_dev_x = np.array(dev_x)
        str_dev_y = np.array(dev_y)
        str_test_x = np.array(test_x)
        str_test_y = np.array(test_y)
        if (args.num_str_parameter > 0):
            str_train_x, str_train_y, str_dev_x, str_dev_y, str_test_x, str_test_y = H.convertDataWithStrArgs(
                train_x, train_y, dev_x, dev_y, test_x, test_y)

        evl = Evaluator(args.out_dir_path, (str_train_x, str_train_y),
                        (str_dev_x, str_dev_y), (str_test_x, str_test_y),
                        no_threshold=True)

        logger.info(
            '---------------------------------------------------------------------------------------'
        )
        logger.info('Initial Evaluation:')
        evl.evaluate(model, -1)

        # Print and send email Init LSTM
        content = evl.print_info()

        total_train_time = 0
        total_eval_time = 0

        for ii in range(args.epochs):
            t0 = time()
            history = model.fit(str_train_x,
                                str_train_y,
                                batch_size=args.batch_size,
                                class_weight=class_weight,
                                nb_epoch=1,
                                shuffle=True,
                                verbose=0)
            tr_time = time() - t0
            total_train_time += tr_time

            # Evaluate
            t0 = time()
            curr_best_acc = evl.evaluate(model, ii)
            evl_time = time() - t0
            total_eval_time += evl_time

            logger.info(
                'Epoch %d, train: %is (%.1fm), evaluation: %is (%.1fm)' %
                (ii, tr_time, tr_time / 60.0, evl_time, evl_time / 60.0))
            logger.info(
                '[Train] loss: %.4f , metric: %.4f' %
                (history.history['loss'][0], history.history['acc'][0]))
            # Print and send email Epoch LSTM
            content = evl.print_info()

        if best_acc < curr_best_acc:
            best_acc = curr_best_acc
            best_active_counter = counter
        logger.info('Best accuracy @%d : %.4f' %
                    (best_active_counter, best_acc))

        if best_acc_full_len < curr_best_acc and len(
                test_y) >= args.test_amount_limit:
            best_acc_full_len = curr_best_acc
            best_active_counter_full_len = counter
        logger.info('Best accuracy with full test set @%d : %.4f' %
                    (best_active_counter_full_len, best_acc_full_len))
Beispiel #29
0
def LoadMovieLensData():
    ml = MovieLens()
    print("Loading movie ratings...")
    data = ml.loadMovieLensLatestSmall()
    print(
        "\nComputing movie popularity ranks so we can measure novelty later..."
    )
    rankings = ml.getPopularityRanks()
    return (ml, data, rankings)


np.random.seed(0)
random.seed(0)

# load up common data set for the recommender algorithms
(ml, evaluationData, rankings) = LoadMovieLensData()

# construct an evaluator to
evaluator = Evaluator(evaluationData, rankings)

contentKNN = ContentKNNAlgorithm()
evaluator.addAlgorithm(contentKNN, "ContentKNN")

# just make random recommendations
Random = NormalPredictor()
evaluator.addAlgorithm(Random, "Random")

evaluator.evaluate(False)

evaluator.sampleTopNRecs(ml)
Beispiel #30
0
class Client:
    def __init__(self, width, height, evaluator="default", name="JJF"):
        self.color = ""
        self.width = width
        self.height = height
        self.innerstate = GameState(width, height)
        self.evaluator = Evaluator(evaluator)

        self.name = name

        self.innerstate.populate_bauern()

    def find_best_move(self):
        rating, move = self.evaluator.evaluate(self.innerstate, -100, 100)
        return move

    def connect(self):
        print(self.name)

    def start_game(self):
        self.color = input()
        print("ok")
        self.innerstate = GameState(self.width, self.height)
        self.innerstate.populate_bauern()

    def end_game(self):
        movestring = input()
        if movestring == "done":
            return
        else:
            return  #error!

    def run(self):
        self.connect()
        while True:
            turn = "white"
            self.start_game()
            while True:
                if turn == self.color:
                    move = self.find_best_move()
                    print(
                        Move.write_move(move, turn == "black",
                                        self.innerstate))
                else:
                    movestring = input()
                    if movestring == "done":
                        break
                    else:
                        move = Move.parse_move(movestring, turn == "black",
                                               self.innerstate)

                if not self.innerstate.checkIfLegal(move):
                    print("uups")  #error
                self.innerstate.applyMove(move)
                self.innerstate.rotateBoard()
                turn = self.invert_turn(turn)

                if self.innerstate.game_is_finished() != None:
                    self.end_game()
                    break

    @staticmethod
    def invert_turn(turn):
        return "black" if turn == "white" else "white"
Beispiel #31
0
class AiMove():
    def predictMove(self, game_state: GameState):
        util_value, prob_state = self.tapToAll(game_state)
        prob_state_iter = prob_state.copy()
        best_util = None

        for iterr in range(0):
            for i in range(len(prob_state_iter)):
                best_util = None
                if util_value[i] is not None:
                    util_value2, prob_state2 = self.tapToAll(
                        prob_state_iter[i])
                    print("iter ke i", i)
                    for y in range(len(util_value2)):
                        if util_value2[y] is not None:
                            if best_util is None:
                                best_util = util_value2[y]
                            elif best_util > util_value2[y]:
                                best_util = util_value2[y]
                    print("Minim UTIL kecil", best_util)

                    if best_util is not None and best_util < util_value[i]:
                        print("pindah dari ", util_value[i], " util ke ",
                              best_util)
                        util_value[i] = best_util
            prob_state_iter = prob_state2.copy()

        # for i

        best_finger = None
        best_util = None
        for i in range(len(util_value)):
            if util_value[i] is not None:
                if best_util is None:
                    best_util = util_value[i]
                    best_finger = i
                elif best_util < util_value[i]:
                    best_util = util_value[i]
                    best_finger = i

        print(util_value)
        print("BEST UTIL ",
              best_util,
              " finger ",
              best_finger,
              " state ",
              end='')
        prob_state[best_finger].print()
        return best_finger, best_util, prob_state[best_finger]

    def tapToAll(self, game_state: GameState):
        self.eval = Evaluator()

        player_left = game_state.values[0][0]
        player_right = game_state.values[0][1]
        ai_left = game_state.values[1][0]
        ai_right = game_state.values[1][1]

        probability_move = []
        utility_value = [0, 0, 0, 0, 0]

        if (game_state.player == 1):  #Ai turn
            probability_move.append(
                GameState(0, (player_left + ai_left) % 5, player_right,
                          ai_left, ai_right))
            probability_move.append(
                GameState(0, player_left, (player_right + ai_left) % 5,
                          ai_left, ai_right))
            probability_move.append(
                GameState(0, (player_left + ai_right) % 5, player_right,
                          ai_left, ai_right))
            probability_move.append(
                GameState(0, player_left, (player_right + ai_right) % 5,
                          ai_left, ai_right))
            if ((ai_left + ai_right) % 2 == 0):
                probability_move.append(
                    GameState(0, player_left, player_right,
                              int((ai_left + ai_right) / 2),
                              int((ai_left + ai_right) / 2)))
        else:  #Player turn
            probability_move.append(
                GameState(1, player_left, player_right,
                          (player_left + ai_left) % 5, ai_right))
            probability_move.append(
                GameState(1, player_left, player_right,
                          (player_right + ai_left) % 5, ai_right))
            probability_move.append(
                GameState(1, player_left, player_right, ai_left,
                          (player_left + ai_right) % 5))
            probability_move.append(
                GameState(1, player_left, player_right, ai_left,
                          (player_right + ai_right) % 5))
            if ((player_left + player_right) % 2 == 0):
                probability_move.append(
                    GameState(1, int((player_left + player_right) / 2),
                              int((player_left + player_right) / 2), ai_left,
                              ai_right))

        for i in range(len(probability_move)):
            print("i:", i, end=' \t')
            probability_move[i].print()
            if ai_left == 0:
                utility_value[0] = utility_value[1] = None
            if ai_right == 0:
                utility_value[2] = utility_value[3] = None
            if player_left == 0:
                utility_value[0] = utility_value[2] = None
            if player_right == 0:
                utility_value[1] = utility_value[3] = None

            if ((ai_left == ai_right) or (((ai_left + ai_right) % 2) != 0)
                ) and probability_move[i].player == 0:
                utility_value[4] = None
            if ((player_left == player_right) or
                ((player_left + player_right) % 2) != 0
                ) and probability_move[i].player == 1:
                utility_value[4] = None

            if utility_value[i] is not None:
                utility_value[i] = self.eval.evaluate(probability_move[i], 1)

        print("UTILITY ", utility_value)

        return utility_value, probability_move
Beispiel #32
0
logger.info(
    '---------------------------------------------------------------------------------------'
)

###############################################################################################################################
## Training
#

logger.info('Initial Evaluation:')
evl = Evaluator(logger,
                out_dir, (train_qn_x, train_ans_x, train_y),
                (dev_qn_x, dev_ans_x, dev_y), (test_qn_x, test_ans_x, test_y),
                model_type,
                batch_size_eval=batch_size_eval,
                print_info=True)
evl.evaluate(model, -1)

evl.print_info()

total_train_time = 0
total_eval_time = 0

for ii in range(nb_epoch):
    # Training
    train_input = [train_qn_x, train_ans_x]
    if model_type == 'cnnwang2016':
        train_input = [train_qn_x, train_qn_x, train_ans_x, train_ans_x]

    t0 = time()
    # this model.fit function is the neuralnet training
    train_history = model.fit(train_input,