예제 #1
0
 def operate(self):
     """
     operate the controller, i.e. operate all nature
     :return: None
     """
     self.nature_list[:] = []
     process_list = []
     for i in range(0, self.nature_num):
         process_list.append(NatureProcess(idx=i, save_dir=self.save_dir, read_dir=self.read_dir,
                                           chromo_num=self.chromo_sum, new_chromo_num=self.new_chromo_num,
                                           punish=self.punish))
     # start and join all nature process
     for nature_process in process_list:
         nature_process.start()
     for nature_process in process_list:
         nature_process.join()
     for i in range(0, self.nature_num):
         nature = pickle_load(self.save_dir + '/nature' + str(i) + '.pkl')
         self.nature_list.append(nature)
     # set all g_map to the g_map in the controller
     for nature in self.nature_list:
         nature.g_map = self.g_map
         for chromo in nature.chromo_list:
             chromo.g_map = self.g_map
             for route in chromo.sequence:
                 route.g_map = self.g_map
     self.__migrate__()
     for i, nature in enumerate(self.nature_list):
         pickle_dump(nature, self.save_dir + '/nature' + str(i) + '.pkl')
예제 #2
0
 def run(self) -> None:
     try:
         nature: Nature = pickle_load(self.save_dir)
         nature.set_punish_para(punish=self.punish)
     except FileNotFoundError:
         print('No "nature{}" in given direction. New "nature" will be created.'.format(self.idx))
         nature: Nature = Nature(chromo_list=[], chromo_num=self.chromo_num, g_map=GlobalMap(self.read_dir),
                                 new_chromo_num=self.new_chromo_num, punish=self.punish)
     nature.operate()
     pickle_dump(nature, self.save_dir)
예제 #3
0
파일: main.py 프로젝트: dj-boy/GOC-VRPTW
def main():
    g_map = GlobalMap(read_dir=read_dir)
    if not load:
        controller = Controller(nature_num=nature_num,
                                chromo_num=chromo_num,
                                g_map=g_map,
                                punish=_punish,
                                read_dir=read_dir,
                                save_dir=save_dir)
    else:
        try:
            controller: Controller = pickle_load(save_dir + '/controller.pkl')
            controller.set_punish(punish=_punish)
        except FileNotFoundError:
            print(
                'No "controller" in given direction. New "controller" will be created.'
            )
            controller = Controller(nature_num=nature_num,
                                    chromo_num=chromo_num,
                                    g_map=g_map,
                                    punish=_punish,
                                    read_dir=read_dir,
                                    save_dir=save_dir)

    for generation in range(0, generation_num):
        print('Generation {} start.'.format(generation))
        controller.operate()
        best: Chromo = controller.get_best()
        print('Best Cost: {}\tRoute Num: {}\tPunish Num: {}'.format(
            best.cost, len(best.sequence), best.has_punish_num()))
        if generation % 10 == 9:
            if save:
                pickle_dump(controller, file_path=save_dir + '/controller.pkl')
            controller.set_punish(punish=controller.punish * punish_increase)

    best_chromo: Chromo = controller.get_best()
    for route in best_chromo.sequence:
        print(route.sequence)
예제 #4
0
def fit_Model(Model, imgs, params=dict(), load=True, save=True, tag=None):
    """Fit a model to images.

    Args:
        Model(function): One of the interface funciton of clustering.py file.
        imgs(nibabel.Nifti1Image): 4D image on which to fit the model.
        params(dict): Additional parameters passed to each parcellation
            method. Default arguments are replaced by newer ones.
        load(bool): Whether to load the previous results of a fit.
        save(bool): Whether to save the results of the fit.
        tag(string): Suffix to add to the saved file.

    Returns:
        Fitted model. Same return type as given Model functions.

    """
    filepath = save_dir + get_dump_token(Model.__name__ + '_', tag=tag)

    model = pickle_load(filepath, load=load)
    if model is not None:
        return model

    model = Model(imgs, params)
    return pickle_dump(model.fit(imgs), filepath, save=save)
예제 #5
0
from tools import GlobalMap, pickle_dump, pickle_load
from PGA import Controller
from PGA import Nature
from PGA import Chromo
from PGA import Route

import numpy as np

generation_num = 500
chromo_num = 50
_punish = 99999
save_dir = 'data/controller.pkl'

controller: Controller = pickle_load(save_dir)

best = controller.get_best()
_cost = best.cost
print('Big Car Cost:{}'.format(_cost))

for route in best.sequence:
    if route.capacity_remain >= 20000 and route.capacity_waste >= 20000 and route.served_w <= 2 and route.served_v <= 12:
        # this route can served by the smaller vehicle
        _cost -= route.travel_d / 1000 * 2
        if route.start_time <= 9.5:
            _cost -= 100
    print(
        "served_w:{0:.5f}, served_v:{1:.5f}, start_tm:{2:.5f}, capacity_waste:{3:.5f}"
        .format(route.served_w, route.served_v, route.start_time,
                max(route.capacity_waste,
                    route.capacity_remain)), route.sequence)
print('Customer Number:{}'.format(best.get_custom_num()))
예제 #6
0
tags=[]
#del df
k=0

try:
    with open(path+"tags_winter.txt",'r') as f:
        count = 0
        j=0
        for line in f:
            line= line[:-1]
            tags.append(line[:-1])
            count+=1
            if count==25:
                j=100
                line='VIK_PDT2002.vY'
            df = pickle_load(path+line+".pickle")

            try:
                df=df.resample('%ds'%time_interval).fillna(method='ffill')
            except:
                j+=1
                df=df.resample('%ds'%time_interval).mean().fillna(method='ffill').fillna(method='bfill')
            arrays[line]=df
            if j==100:
                break
except:
    print('k')
    with open(path+"tags.txt",'r') as f:
        j = 0
        for line in f:
            line=line[:-1]
예제 #7
0
#this script takes a pickled dataframe and pickles each dataframe within the dataframe
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import subprocess as sub
from tools import pickle_load, tags_of_place, remove_time_aspect, load_weather

#import matplotlib.pyplot as plt
time_interval = 120
place = "VIK"
sub.call("mkdir %s_pickles" % place, shell=True)
path = "/home/josephkn/Documents/Fortum/master/%s_pickles/" % place
path2 = "/home/josephkn/Documents/Fortum/master/pickle6/"
df = pickle_load(path2 + place + '6.pickle')
grp = df.groupby('tag', sort=False, as_index=False)
arrays = dict()
tags = []
del df
k = 0
try:
    with open(path + "tags_winter.txt", 'r') as f:
        for line in f:
            tags.append(line[:-1])

except:
    print('k')
    tags = list(tags_of_place(df))
    k = 1
for tag, slicee in grp:
    slicee = slicee.drop(columns=['tag'])
    slicee['Date'] = pd.to_datetime(slicee['Date'])
예제 #8
0
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 19 16:09:05 2015

@author: emg
"""

from tools import pickle_load, make_reverse_dict

writer_films = pickle_load('writers_file')
film_writers = make_reverse_dict(writer_films)

director_films = pickle_load('directors_file')
film_directors = make_reverse_dict(director_films)

actress_films = pickle_load('actresses_file')
film_actresses = make_reverse_dict(actress_films)

예제 #9
0
파일: extract.py 프로젝트: alexprz/meta_ALE
def extract_from_paths(path_dict, data=['coord', 'path'],
                       threshold=1.96, tag=None, load=True):
    """
    Extract data from given images.

    Extracts data (coordinates, paths...) from the data and put it in a
        dictionnary using Nimare structure.

    Args:
        path_dict (dict): Dict which keys are study names and values
            are another dict which keys are map names ('z', 'con', 'se', 't')
            and values are absolute paths (string) to these maps.
        data (list): Data to extract. 'coord' and 'path' available.
        threshold (float): value below threshold are ignored. Used for
            peak detection.
        tag (str): Name of the file to load/dump.
        load (bool): If True, load a potential existing result.
            If False or not found, compute again.

    Returns:
        (dict): Dictionnary storing the coordinates using the Nimare
            structure.

    """
    if tag is not None:
        # Loading previously computed dict if any
        ds_dict = pickle_load(save_dir+tag, load=load)
        if ds_dict is not None:
            return ds_dict

    # Computing a new dataset dictionary
    def extract_pool(name, map_dict):
        """Extract activation for multiprocessing."""
        print(f'Extracting {name}...')

        XYZ = None
        if 'coord' in data:
            XYZ = get_activations(map_dict['z'], threshold)
            if XYZ is None:
                return

        if 'path' in data:
            # base, filename = ntpath.split(path)
            # file, ext = filename.split('.', 1)

            # path_dict = {'z': path}
            # for map_type in ['t', 'con', 'se']:
            #     file_path = f'{base}/{file}_{map_type}.{ext}'
            #     if os.path.isfile(file_path):
            #         path_dict[map_type] = file_path

            # return get_sub_dict(XYZ, path_dict)
            return get_sub_dict(XYZ, map_dict)

        if XYZ is not None:
            return get_sub_dict(XYZ, None)

        return

    n_jobs = multiprocessing.cpu_count()
    res = Parallel(n_jobs=n_jobs, backend='threading')(
        delayed(extract_pool)(name, maps) for name, maps in path_dict.items())

    # Removing potential None values
    res = list(filter(None, res))
    # Merging all dictionaries
    ds_dict = {k: v for k, v in enumerate(res)}

    if tag is not None:
        pickle_dump(ds_dict, save_dir+tag)  # Dumping
    return ds_dict
예제 #10
0
    '''creates dict of film names : rating from ratings_file'''
    results = {}
    for i, line in enumerate(open(filename)):
        if header < i <= footer:       
            name = line.split('  ')[-1].strip()
            rating = line.split('  ')[-2].strip()
            results[name] = rating
            #if i%1000 == 0:
                #print i, 'rating', rating, name
    return results

def dump_ratings_list():
    ratings_list = read_ratings(ratings_file)
    pickle_dump(ratings_list)

ratings_list = pickle_load('ratings_list')

def get_ratings(name, filmography_dict):
    '''get the ratings for every film by the writer of director
    filmography_dict = director_films or writer_films'''
    ratings = []
    nums = []
    films = filmography_dict[name]
    for film in films:
        if film in set(ratings_list):
            rating = float(ratings_list[film])
        else:
            rating = sp.nan # figure out why some ratings not found
        ratings.append([film, rating])
        nums.append(rating)
    ratings.append(['Average Rating', sp.nanmean(nums)])