def plot_runs_mean(runs_results):
    graph_name = shared.getConst("graph_name")
    max_duration = shared.getConst("max_duration")
    nb_runs, generations, mean_fit_mins, mean_fit_avg, mean_duration_mins, mean_duration_maxs = runs_results
    fig, ax1 = plt.subplots()
    fig.suptitle(f"Mean results over {nb_runs} runs, graph: '{graph_name}', duration constraint: {max_duration}")
    line1 = ax1.plot(generations, mean_fit_mins, "b-", label="Minimum Cost")
    line3 = ax1.plot(generations, mean_fit_avg, "g-", label= "Average Cost")
    ax1.set_xlabel("Generation")
    ax1.set_ylabel("Cost", color="b")
    for tl in ax1.get_yticklabels():
        tl.set_color("b")

    ax2 = ax1.twinx()
    line2 = ax2.plot(generations, mean_duration_mins, "r-", label="Minimum Duration")
    line4 = ax2.plot(generations, mean_duration_maxs, "y-", label="Maximum Duration")
    ax2.set_ylabel("Duration", color="r")
    for tl in ax2.get_yticklabels():
        tl.set_color("r")

    lns = line1 + line2 + line3 + line4
    labs = [l.get_label() for l in lns]
    ax1.legend(lns, labs, loc="upper right")

    OUTPUT_DIR = os.environ.get("AZ_BATCH_TASK_DIR", ".")
    OUTPUT_FILE = os.path.join(OUTPUT_DIR, f"{graph_name}_{nb_runs}_runs.png")
    plt.savefig(OUTPUT_FILE)
def extractComProFea(compro):
   com,pro = compro
   comFea = sh.getConst('krDict')[com]
   proFea = sh.getConst('aacDict')[pro]
   fea = np.append(comFea,proFea)
   fea = fea.tolist()
   return fea
Beispiel #3
0
def _cluster(params):
    cls = None
    method = sh.getConst('method')
    if method=='kmedoid':
        assert False
        # from kmedoid import kmedsoid
        # cls = kmedoid
    elif method=='dbscan':
        from sklearn.cluster import DBSCAN
        cls = DBSCAN(eps=params['eps'],min_samples=params['min_samples'],
                     metric='precomputed')
    else:
        assert False, 'FATAL: unknown cluster method'

    ##
    mat = sh.getConst('mat')
    labels = cls.fit_predict(mat)
    nLabels = len(set(labels))

    ##
    sil = None; cal = None
    if (nLabels >= 2)and(nLabels <= len(labels)-1):
        sil = met.silhouette_score(mat,labels,'precomputed')
        cal = met.calinski_harabaz_score(mat,labels)
    perf = dict(silhouette_score=sil,calinski_harabaz_score=cal)

    return (labels,perf)
Beispiel #4
0
def extractComProFea(compro):
    com, pro = compro
    comFea = sh.getConst('krDict')[com]
    proFea = sh.getConst('aacDict')[pro]
    fea = np.append(comFea, proFea)
    fea = fea.tolist()
    return fea
Beispiel #5
0
def funcUseSharedConstant():
    # Tries on a mutable and an immutable object
    assert shared.getConst('myVar') == {
        1: 'Example 1',
        2: 'Example 2',
        3: 'Example 3',
    }
    assert shared.getConst('secondVar') == "Hello World!"
    return True
Beispiel #6
0
def funcUseSharedConstant():
    # Tries on a mutable and an immutable object
    assert shared.getConst('myVar') == {
        1: 'Example 1',
        2: 'Example 2',
        3: 'Example 3',
    }
    assert shared.getConst('secondVar') == "Hello World!"
    return True
def genetic_algo():
    # Shared constants
    graph_name = shared.getConst("graph_name")
    graph = shared.getConst("graph")
    max_duration = shared.getConst("max_duration")
    # Extra toolbox registers
    toolbox.register("population_guess", initPopulation, list, toolbox.individual_guess)
    toolbox.register("evaluate", evaluate, graph, max_duration=max_duration)
    # Creating the population
    pop = toolbox.population_guess()

    # Creating a logbook for recording statistics
    logbook = tools.Logbook()

    # To the heart of the genetic algorithm

    for g in range(NGEN):
        # Select the next generation individuals
        offspring = toolbox.select(pop, len(pop))
        # Clone the selected individuals
        offspring = [toolbox.clone(ind) for ind in offspring]

        # Apply crossover on the offspring
        for child1, child2 in zip(offspring[::2], offspring[1::2]):
            if random.random() < CXPB:
                toolbox.mate(child1, child2)
                del child1.fitness.values
                del child2.fitness.values

        # Apply mutation on the offspring
        for mutant in offspring:
            if random.random() < MUTPB:
                toolbox.mutate(mutant)
                del mutant.fitness.values

        # Evaluate the individuals with an invalid fitness
        invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
        fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
        for ind, fit in zip(invalid_ind, fitnesses):
            ind.fitness.values = fit

        # The population is entirely replaced by the offspring
        pop[:] = offspring

        # Recording statistics
        record = mstats.compile(pop)
        logbook.record(gen=g, evals=len(invalid_ind), **record)

    gen = logbook.select("gen")
    fit_mins = logbook.chapters["cost"].select("min")
    duration_mins = logbook.chapters["duration"].select("min")
    duration_maxs = logbook.chapters["duration"].select("max")
    fit_avg = logbook.chapters["cost"].select("avg")

    return gen, fit_mins, fit_avg, duration_mins, duration_maxs
Beispiel #8
0
def funcUseDeletedSharedConstant():
    # Tries to retrieve a deleted variable, should timeout
    shared.deleteConst('myVar')
    shared.deleteConst('secondVar')
    timeout = 0.01
    assert shared.getConst('myVar', timeout=timeout) is None
    assert shared.getConst('secondVar', timeout=timeout) is None
    try:
        # You should not be able to reset a deleted variable
        shared.setConst(myVar=42)
        return False
    except TypeError:
        pass
    return True
Beispiel #9
0
def evaluate(compile_valtree, compile_condtree,
             individual: DoubleTreeBasedIndividual):
    compiled_conditions = [
        compile_condtree(cond_tree) for cond_tree in individual.cond_trees
    ]
    # compiled_conditions = [compile_condtree(cond_tree) for cond_tree in individual["CONDITION_TREES"]]
    compiled_values = [
        compile_valtree(val_tree) for val_tree in individual.value_trees
    ]
    # compiled_values = [compile_valtree(val_tree) for val_tree in individual["VALUE_TREES"]]
    results = []
    num_of_solved = 0
    # run with Bomb only
    # nonogram_solved = utils.load_solved_bomb_nonogram_from_file()
    # nonogram_unsolved = utils.load_unsolved_bomb_nonogram_from_file()
    # results.append(evaluate_single_nonogram(compiled_conditions, compiled_values, nonogram_solved, nonogram_unsolved))

    # run on all solved nonograms
    train_nonograms_sh = shared.getConst('train_nonograms')
    for nonogram_unsolved, nonogram_solved in train_nonograms_sh:
        result = round(
            evaluate_single_nonogram(compiled_conditions, compiled_values,
                                     nonogram_solved, nonogram_unsolved), 4)
        results.append(result)
        if result == 5:
            num_of_solved += 1
    if print_individual_fitness:
        print("Fitness:", results, round(results, 4))
    # print('-------------------')

    # if num_of_solved > 0:
    #     print("Solved:", num_of_solved, "Nonograms")
    distance = _calc_distance(results)
    return distance,
Beispiel #10
0
def maxTreeDepthDivide(rootValue, currentDepth=0, parallelLevel=2):
    """Finds a tree node that represents rootValue and computes the max depth
       of this tree branch.
       This function will emit new futures until currentDepth=parallelLevel"""
    thisRoot = shared.getConst('myTree').search(rootValue)
    if currentDepth >= parallelLevel:
        return thisRoot.maxDepth(currentDepth)
    else:
        # Base case
        if not any([thisRoot.left, thisRoot.right]):
            return currentDepth
        if not all([thisRoot.left, thisRoot.right]):
            return thisRoot.maxDepth(currentDepth)

        # Parallel recursion
        return max(
            futures.map(
                maxTreeDepthDivide,
                [
                    thisRoot.left.payload,
                    thisRoot.right.payload,
                ],
                cycle([currentDepth + 1]),
                cycle([parallelLevel]),
            )
        )
Beispiel #11
0
def maxTreeDepthDivide(rootValue, currentDepth=0, parallelLevel=2):
    """Finds a tree node that represents rootValue and computes the max depth
       of this tree branch.
       This function will emit new futures until currentDepth=parallelLevel"""
    thisRoot = shared.getConst('myTree').search(rootValue)
    if currentDepth >= parallelLevel:
        return thisRoot.maxDepth(currentDepth)
    else:
        # Base case
        if not any([thisRoot.left, thisRoot.right]):
            return currentDepth
        if not all([thisRoot.left, thisRoot.right]):
            return thisRoot.maxDepth(currentDepth)

        # Parallel recursion
        return max(
            futures.map(
                maxTreeDepthDivide,
                [
                    thisRoot.left.payload,
                    thisRoot.right.payload,
                ],
                cycle([currentDepth + 1]),
                cycle([parallelLevel]),
            ))
Beispiel #12
0
def process_doc(filepath):
    print "filepath", filepath
    journal_title, article_subjects, article_kwds, article_title, article_abstract, article_contents = get_fields(filepath)
    doc_id = filepath.strip().split("/")[-1].split(".")[0]

    s  = "<DOC>\n"
    s += "<DOCNO> "+ doc_id  +" </DOCNO>\n "
    if journal_title:
        s += "<JTITLE> "+ journal_title  +" </JTITLE>\n "
    if article_subjects:
        for sub in article_subjects:
            if sub:
                s += "<SUB> "+ sub  +" </SUB>\n "
    if article_kwds:
        for kwd in article_kwds:
            if kwd:
                s += "<KWD> "+ kwd  +" </KWD>\n "
    if article_title:
        s += "<ATITLE> "+ article_title  +" </ATITLE>\n "
    if article_abstract:
        s += "<ABSTRACT> "+ article_abstract  +" </ABSTRACT>\n "
    s += "<TEXT> "+ article_contents  +" </TEXT>\n"
    s += "</DOC>\n"

    destination = shared.getConst('destination')
    with codecs.open(destination + "/" + doc_id, encoding="utf-8", mode="w") as f:
        f.write(s)
    return True
Beispiel #13
0
def assess_reaction(rxn, reactionSystems, tolerance, data):
    """
    Returns whether the reaction is important or not in the reactions.

    It iterates over the reaction systems, and loads the concentration profile 
    of each reaction system into memory.

    It iterates over a number of samples in profile and 
    evaluates the importance of the reaction at every sample.


    """
    
    logging.debug('Assessing reaction {}'.format(rxn))

    reactions = shared.getConst('reactions')
    

    # read in the intermediate state variables

    for datum, reactionSystem in zip(data, reactionSystems):    
        T, P = reactionSystem.T.value_si, reactionSystem.P.value_si
        
        species_names, profile = datum

        # take N evenly spaced indices from the table with simulation results:

        """

        Number of time steps between start and end time of the batch reactor simulation at which the importance of 
        reactions should be evaluated.



        The more timesteps, the less chance we have to remove an important reactions, but the more simulations
        need to be carried out.
        """
        
        timesteps = len(profile) / 2
        logging.debug('Evaluating the importance of a reaction at {} time samples.'.format(timesteps))

        assert timesteps <= len(profile)
        indices = map(int, np.linspace(0, len(profile)-1, num = timesteps))
        for index in indices:
            assert profile[index] is not None
            timepoint, coreSpeciesConcentrations = profile[index]

            coreSpeciesConcentrations = {key: float(value) for (key, value) in zip(species_names, coreSpeciesConcentrations)}
            
            # print 'Species concentrations at {}: {}'.format(timepoint, reactionSystem.coreSpeciesConcentrations)
            for species_i in rxn.reactants:
                if isImportant(rxn, species_i, reactions, 'reactant', tolerance, T, P, coreSpeciesConcentrations):
                    return True

            #only continue if the reaction is not important yet.
            for species_i in rxn.products:
                if isImportant(rxn, species_i, reactions, 'product', tolerance, T, P, coreSpeciesConcentrations):
                    return True

    return False
Beispiel #14
0
def _calc_max_possible_fitness():
    train_nonograms = shared.getConst('train_nonograms')
    compares = [
        _compare_to_solution(solved, solved)
        for unsolved, solved in train_nonograms
    ]
    res = np.mean(compares)
    print('max possible fitness is:', res)
    return res
def getValue(words):
    """Computes the sum of the values of the words."""
    value = 0
    for word in words:
        for letter in word:
            # shared.getConst will evaluate to the dictionary broadcasted by
            # the root Future
            value += shared.getConst("lettersValue")[letter]
    return value
Beispiel #16
0
def getValue(words):
    """Computes the sum of the values of the words."""
    value = 0
    for word in words:
        for letter in word:
            # shared.getConst will evaluate to the dictionary broadcasted by
            # the root Future
            value += shared.getConst('lettersValue')[letter]
    return value
Beispiel #17
0
def generate(size, pmin, pmax, smin, smax):
    part = creator.Particle(random.uniform(pmin, pmax) for _ in range(size))
    part.speed = [random.uniform(smin, smax) for _ in range(size)]
    part.smin = smin
    part.smax = smax
    part.pmax = pmax
    part.pmin = pmin
    factory = getattr(parameterSetsAdaption, shared.getConst('adaptionParams'))
    myInt = int(shared.getConst('parameterListIndex'))
    if (shared.getConst('parameterListIndex') is not None):
        factory = factory[myInt]
    vfile_name = shared.getConst('vesselInputFile')
    grp_name = shared.getConst('vessel_grp')
    factory['adaption'].update(
        vesselFileName=vfile_name,
        vesselGroupName=grp_name,
    )
    part.adaptionParameters = factory
    return part
Beispiel #18
0
def myFunc(parameter):
    """This function will be executed on the remote host even if it was not
       available at launch."""
    print('Hello World from {0}!'.format(scoop.worker))

    # It is possible to get a constant anywhere
    print(shared.getConst('myVar')[2])

    # Parameters are handled as usual
    return parameter + 1
def initPopulation(pcls, ind_init):
    """
    pcls : class of the population (i.e. a toolbox attribute of an individual)
    ind_init : function to initialize individuals

    return : a list of individuals
    """
    graph = shared.getConst("graph")
    contents = init_generation(NB_POP, MAX_MACH, graph)
    return pcls(ind_init(c) for c in contents)
Beispiel #20
0
def get(key):    
    """
    Searches for the shared variable to retrieve identified by the 
    parameter key.
    """

    try:
        data = shared.getConst(key)
        return data
    except KeyError, e:
        logging.error('An object with the key {} could not be found.'.format(key))
        raise e
Beispiel #21
0
def get(key):
    """
    Searches for the shared variable to retrieve identified by the 
    parameter key.
    """

    try:
        data = shared.getConst(key)
        return data
    except KeyError, e:
        logging.error(
            'An object with the key {} could not be found.'.format(key))
        raise e
Beispiel #22
0
def get(key):    
    """
    Searches for the shared variable to retrieve identified by the 
    parameter key.
    """

    try:
        data = shared.getConst(key, timeout=1e-9)
        return data
    except NameError:
        """
        Name error will be caught when the SCOOP library is not imported properly.
        """
        logger.debug('SCOOP not loaded. Not retrieving the shared object with key {}'.format(key))
Beispiel #23
0
def get(key):    
    """
    Searches for the shared variable to retrieve identified by the 
    parameter key.
    """

    try:
        data = shared.getConst(key, timeout=1e-9)
        return data
    except NameError:
        """
        Name error will be caught when the SCOOP library is not imported properly.
        """
        logger.debug('SCOOP not loaded. Not retrieving the shared object with key {}'.format(key))
Beispiel #24
0
def broadcast(obj, key):
    """
    Broadcasts the object across the workers using the key parameter as the key.
    """      
    
    kwargs = {key : obj}
    try:
        if shared.getConst(key):
            logger.debug('An object with the key {} was already broadcasted.'.format(key))
        else:
            shared.setConst(**kwargs)
    except NameError, e:
        """
        Name error will be caught when the SCOOP library is not imported properly.
        """
        logger.debug('SCOOP not loaded. Not broadcasting the object {}'.format(obj))
def funcBroadcast():
    """
    Broadcast the data with the given key, 
    and retrieve it again by querying the key again.
    """
    data = 'foo'
    key = 'bar'

    broadcast(data, key)

    try:
        assert data == shared.getConst(key)
    except AssertionError:
        return False
    
    return True
Beispiel #26
0
def broadcast(obj, key):
    """
    Broadcasts the object across the workers using the key parameter as the key.
    """      
    
    kwargs = {key : obj}
    try:
        if shared.getConst(key):
            logger.debug('An object with the key {} was already broadcasted.'.format(key))
        else:
            shared.setConst(**kwargs)
    except NameError, e:
        """
        Name error will be caught when the SCOOP library is not imported properly.
        """
        logger.debug('SCOOP not loaded. Not broadcasting the object {}'.format(obj))
Beispiel #27
0
def funcBroadcast():
    """
    Broadcast the data with the given key, 
    and retrieve it again by querying the key again.
    """
    data = 'foo'
    key = 'bar'

    broadcast(data, key)

    try:
        assert data == shared.getConst(key)
    except AssertionError:
        return False

    return True
def configureScoop():
    global CONFIGURED
    if CONFIGURED or not SCOOP_IS_RUNNING:
      return
    global MYSYSTEM, N_RANDOM_PROBLEMS, RANDOM_SEED, USE_RECAST, BASELINE, OUTPUT_FOLDER
    MYSYSTEM = shared.getConst('MYSYSTEM')
    N_RANDOM_PROBLEMS = shared.getConst('N_RANDOM_PROBLEMS')
    RANDOM_SEED = shared.getConst('RANDOM_SEED')
    USE_RECAST = shared.getConst('USE_RECAST')
    BASELINE = shared.getConst('BASELINE')
    OUTPUT_FOLDER = shared.getConst('OUTPUT_FOLDER')
    # finish
    configureEvaluator()
    CONFIGURED = True
Beispiel #29
0
    def start_experiment(self):
        train_nonograms = shared.getConst('train_nonograms')
        nonogram_names = [
            unsolved.title for unsolved, solved in train_nonograms
        ]
        print('running experiment on', train_size, 'nonograms. names:',
              nonogram_names)
        # max_possible_fitness = _calc_max_possible_fitness()

        start = time.time()
        # mu = len(self.pop)
        # lambda_ = len(self.pop)
        # pop, log = algorithms.eaMuPlusLambda(self.pop, self.toolbox, mu, lambda_, prob_crossover_global, prob_mutate_global, num_gen,
        #                                      halloffame=self.hof, verbose=True, stats=self.stats)
        pop, log = eaSimple_new(self.pop,
                                self.toolbox,
                                prob_crossover_global,
                                prob_mutate_global,
                                num_gen,
                                halloffame=self.hof,
                                verbose=True,
                                stats=self.stats)
        end = time.time()
        return pop, log, self.hof, self.stats, end - start
Beispiel #30
0
def process(sub_parameters):
    V = shared.getConst("V")
    sequence = eval("lambda x:{}".format(V['sequence']))
    weight_function = eval(V['weight_function'])
    extrema = V['extrema']
    i_ip = V['i_ip']
    i_i = V['i_i']
    
    sub_values = {V['subs_symbols'][a]:b[0] for a,b in sub_parameters.iteritems()}
    subs_choice = matrix_map(V['choice'],lambda x,i,j:lambdify(V['pop_symbols'],x.subs(sub_values)))
    pops = [a.copy() for a in V['origional_pops']]
    for i in range(V['iterations']):
        for p in range(len(pops)):
            flattened_pop = [a[0,0] for a in pops[p]]
            weighted_choice = numpy.matrix(matrix_map(subs_choice,lambda x,i,j:x(*flattened_pop)))
            extrema_eigen_pairs = [eigen(weighted_choice*e,i_ip,i_i) for e in extrema]
            random.shuffle(extrema_eigen_pairs)
            extrema_eigen_pairs = sorted(extrema_eigen_pairs, key=operator.itemgetter(0), reverse=True)
            growths = [a[0] for a in extrema_eigen_pairs]
            weights = [weight_function(index,growths) for index in range(len(growths))]
            weights = [a/sum(weights) for a in weights]
            z = sequence(i)
            pops[p] = pops[p]*(1-z) + sum([weights[index]*extrema_eigen_pairs[index][1] for index in range(len(growths))])*z
    return {"parameters":sub_parameters,"pops":[[ppp.sum() for ppp in p] for p in pops]}
Beispiel #31
0
def funcUseSharedFunction():
    assert shared.getConst('myRemoteFunc')(5) == 5 * 5
    assert shared.getConst('myRemoteFunc')(25) == 25 * 25
    return True
Beispiel #32
0
def assess_reaction(rxn, reactionSystems, tolerance, data):
    """
    Returns whether the reaction is important or not in the reactions.

    It iterates over the reaction systems, and loads the concentration profile 
    of each reaction system into memory.

    It iterates over a number of samples in profile and 
    evaluates the importance of the reaction at every sample.


    """

    logging.debug('Assessing reaction {}'.format(rxn))

    reactions = shared.getConst('reactions')

    # read in the intermediate state variables

    for datum, reactionSystem in zip(data, reactionSystems):
        T, P = reactionSystem.T.value_si, reactionSystem.P.value_si

        species_names, profile = datum

        # take N evenly spaced indices from the table with simulation results:
        """

        Number of time steps between start and end time of the batch reactor simulation at which the importance of 
        reactions should be evaluated.



        The more timesteps, the less chance we have to remove an important reactions, but the more simulations
        need to be carried out.
        """

        timesteps = len(profile) / 2
        logging.debug(
            'Evaluating the importance of a reaction at {} time samples.'.
            format(timesteps))

        assert timesteps <= len(profile)
        indices = map(int, np.linspace(0, len(profile) - 1, num=timesteps))
        for index in indices:
            assert profile[index] is not None
            timepoint, coreSpeciesConcentrations = profile[index]

            coreSpeciesConcentrations = {
                key: float(value)
                for (key,
                     value) in zip(species_names, coreSpeciesConcentrations)
            }

            # print 'Species concentrations at {}: {}'.format(timepoint, reactionSystem.coreSpeciesConcentrations)
            for species_i in rxn.reactants:
                if isImportant(rxn, species_i, reactions, 'reactant',
                               tolerance, T, P, coreSpeciesConcentrations):
                    return True

            #only continue if the reaction is not important yet.
            for species_i in rxn.products:
                if isImportant(rxn, species_i, reactions, 'product', tolerance,
                               T, P, coreSpeciesConcentrations):
                    return True

    return False
Beispiel #33
0
def funcUseSharedFunction():
    assert shared.getConst('myRemoteFunc')(5) == 5 * 5
    assert shared.getConst('myRemoteFunc')(25) == 25 * 25
    return True
Beispiel #34
0
def ensembleSmote(xydev):
    xdevf,ydev = xydev
    sm = SMOTE(kind='svm',random_state=sh.getConst('smoteSeed'))
    xdevfr,ydevr = sm.fit_sample(xdevf,ydev)
    return (xdevfr,ydevr)
Beispiel #35
0
def myParallelFunc(inV):
    myV = shared.getConst("myValue")
    return inV + myV