コード例 #1
0
ファイル: controller.py プロジェクト: dj-boy/GOC-VRPTW
 def operate(self):
     """
     operate the controller, i.e. operate all nature
     :return: None
     """
     self.nature_list[:] = []
     process_list = []
     for i in range(0, self.nature_num):
         process_list.append(NatureProcess(idx=i, save_dir=self.save_dir, read_dir=self.read_dir,
                                           chromo_num=self.chromo_sum, new_chromo_num=self.new_chromo_num,
                                           punish=self.punish))
     # start and join all nature process
     for nature_process in process_list:
         nature_process.start()
     for nature_process in process_list:
         nature_process.join()
     for i in range(0, self.nature_num):
         nature = pickle_load(self.save_dir + '/nature' + str(i) + '.pkl')
         self.nature_list.append(nature)
     # set all g_map to the g_map in the controller
     for nature in self.nature_list:
         nature.g_map = self.g_map
         for chromo in nature.chromo_list:
             chromo.g_map = self.g_map
             for route in chromo.sequence:
                 route.g_map = self.g_map
     self.__migrate__()
     for i, nature in enumerate(self.nature_list):
         pickle_dump(nature, self.save_dir + '/nature' + str(i) + '.pkl')
コード例 #2
0
ファイル: controller.py プロジェクト: dj-boy/GOC-VRPTW
 def run(self) -> None:
     try:
         nature: Nature = pickle_load(self.save_dir)
         nature.set_punish_para(punish=self.punish)
     except FileNotFoundError:
         print('No "nature{}" in given direction. New "nature" will be created.'.format(self.idx))
         nature: Nature = Nature(chromo_list=[], chromo_num=self.chromo_num, g_map=GlobalMap(self.read_dir),
                                 new_chromo_num=self.new_chromo_num, punish=self.punish)
     nature.operate()
     pickle_dump(nature, self.save_dir)
コード例 #3
0
ファイル: main.py プロジェクト: dj-boy/GOC-VRPTW
def main():
    g_map = GlobalMap(read_dir=read_dir)
    if not load:
        controller = Controller(nature_num=nature_num,
                                chromo_num=chromo_num,
                                g_map=g_map,
                                punish=_punish,
                                read_dir=read_dir,
                                save_dir=save_dir)
    else:
        try:
            controller: Controller = pickle_load(save_dir + '/controller.pkl')
            controller.set_punish(punish=_punish)
        except FileNotFoundError:
            print(
                'No "controller" in given direction. New "controller" will be created.'
            )
            controller = Controller(nature_num=nature_num,
                                    chromo_num=chromo_num,
                                    g_map=g_map,
                                    punish=_punish,
                                    read_dir=read_dir,
                                    save_dir=save_dir)

    for generation in range(0, generation_num):
        print('Generation {} start.'.format(generation))
        controller.operate()
        best: Chromo = controller.get_best()
        print('Best Cost: {}\tRoute Num: {}\tPunish Num: {}'.format(
            best.cost, len(best.sequence), best.has_punish_num()))
        if generation % 10 == 9:
            if save:
                pickle_dump(controller, file_path=save_dir + '/controller.pkl')
            controller.set_punish(punish=controller.punish * punish_increase)

    best_chromo: Chromo = controller.get_best()
    for route in best_chromo.sequence:
        print(route.sequence)
コード例 #4
0
def fit_Model(Model, imgs, params=dict(), load=True, save=True, tag=None):
    """Fit a model to images.

    Args:
        Model(function): One of the interface funciton of clustering.py file.
        imgs(nibabel.Nifti1Image): 4D image on which to fit the model.
        params(dict): Additional parameters passed to each parcellation
            method. Default arguments are replaced by newer ones.
        load(bool): Whether to load the previous results of a fit.
        save(bool): Whether to save the results of the fit.
        tag(string): Suffix to add to the saved file.

    Returns:
        Fitted model. Same return type as given Model functions.

    """
    filepath = save_dir + get_dump_token(Model.__name__ + '_', tag=tag)

    model = pickle_load(filepath, load=load)
    if model is not None:
        return model

    model = Model(imgs, params)
    return pickle_dump(model.fit(imgs), filepath, save=save)
コード例 #5
0
ファイル: controller.py プロジェクト: dj-boy/GOC-VRPTW
 def set_punish(self, punish):
     self.punish = punish
     for nature in self.nature_list:
         nature.set_punish_para(punish=punish)
     for i, nature in enumerate(self.nature_list):
         pickle_dump(nature, self.save_dir + '/nature' + str(i) + '.pkl')
コード例 #6
0
 def __init__(self, type='FLIC'):
     if type == 'FLIC':
         ip_dir = 'cropped-images'
     elif type == 'SHOULDER':
         ip_dir = 'full-images'
     
     self.ptno_part = {0:'face', 1:'lsho', 2:'lelb', 3:'lwri', 4:'rsho', 5:'relb', 6:'rwri'}
     self.part_pos = dict()
     for pt_no, part in self.ptno_part.items():
         matname = self.ptno_part[pt_no] + '_pos.mat'
         matkey =  self.ptno_part[pt_no] + 'Pos'
         self.part_pos[part] = io.loadmat('unprocessed_data/'+ip_dir+'/' + matname)[matkey]
     
     self.names = io.loadmat('unprocessed_data/'+ip_dir+'/names.mat')['nameList'][0]
     self.is_train = io.loadmat('unprocessed_data/'+ip_dir+'/istrain.mat')['train_set'][0]
     self.scale_and_crop_coords = io.loadmat('unprocessed_data/'+ip_dir+'/scale_and_crop_coords.mat')['scale_and_crop_coords'][0]
                     
     self.X = defaultdict(list)
     self.Y = defaultdict(list)
     self.index = defaultdict(list)
     
     
     # which file is train, test, valid
     # no validation
     train_valid_sep = 10000
     X_names = defaultdict(list)      
     for idx in range(0, len(self.names)):
         if self.is_train[idx] == 1 and len(X_names['train']) < train_valid_sep:
             X_names['train'].append(self.names[idx])
             self.index['train'].append(idx)
         elif self.is_train[idx] == 1 and len(X_names['train']) >= train_valid_sep:   
             X_names['valid'].append(self.names[idx])
             self.index['valid'].append(idx)
         else:
             self.index['test'].append(idx)
             X_names['test'].append(self.names[idx])
     
     test_indices_subset  = [170, 171, 172, 173, 174, 175, 176, 376, 377, 378, 379, 380, 381, 384, 386, 389, 390, 391, 392, 393, 394, 398, 400, 401, 402, 404, 405, 407, 408, 417, 699, 700, 701, 702, 703, 705, 706, 707, 708, 709, 710, 711, 712, 713, 714, 715, 733, 734, 735, 752, 754, 755, 756, 757, 896, 897, 898, 899, 900, 903, 904, 905, 906, 907, 918, 919, 920, 961, 963, 964, 965, 966, 967, 981, 982, 983, 1526, 1527, 1528, 1529, 1533, 1535, 1536, 1537, 1538, 1539, 1540, 1541, 1542, 1543, 1544, 1545, 1546, 1547, 1548, 1561, 1576, 1577, 1609, 1610, 1611, 1612, 1613, 1614, 1626, 1627, 1777, 1778, 1779, 1780, 1781, 1783, 1785, 1786, 1787, 1788, 1789, 1790, 1791, 1792, 1793, 1794, 1795, 1796, 1797, 1798, 1799, 1800, 1815, 1856, 1857, 1858, 1859, 1860, 1885, 2324, 2325, 2327, 2328, 2329, 2330, 2334, 2335, 2336, 2337, 2338, 2339, 2340, 2343, 2344, 2345, 2346, 2347, 2348, 2349, 2589, 2590, 2591, 2592, 2593, 2594, 2595, 2596, 2597, 2598, 2599, 2600, 2601, 2602, 2603, 2604, 2605, 2606, 2607, 2608, 2609, 2610, 2611, 2612, 2613, 2614, 2615, 2616, 2621, 2622, 2623, 2624, 2625, 2626, 2627, 2628, 2629, 2630, 2631, 2632, 2633, 2634, 2922, 2923, 2924, 2925, 2926, 2927, 2928, 2929, 2930, 2931, 2932, 2933, 2934, 2950, 2952, 2953, 2959, 2960, 2961, 2962, 2963, 2964, 2965, 2969, 2970, 2971, 2972, 2973, 3244, 3245, 3246, 3247, 3248, 3249, 3250, 3251, 3252, 3253, 3254, 3255, 3256, 3257, 3258, 3259, 3260, 3261, 3262, 3263, 3264, 3265, 3266, 3267, 3268, 3269, 3270, 3271, 3272, 3273, 3277, 3278, 3279, 3280, 3285, 3286, 3287, 3288, 3300, 3305, 3341, 3344, 3345, 3389, 3390, 3391, 3392, 3393, 3395, 3397, 3398, 3592, 3593, 3594, 3595, 3596, 3597, 3625, 3768, 3769, 3770, 3771, 3772, 3784, 3785, 3786, 3787, 3788, 3789, 3790, 3791, 3845, 3846, 3847, 3848, 3849, 3850, 3884, 3961, 3962, 4341, 4342, 4343, 4344, 4345, 4346, 4347, 4348, 4349, 4376, 4382, 4390, 4395, 4396, 4397, 4406, 4407, 4584, 4585, 4586, 4787, 4790, 4792, 4793, 4796, 4812, 4813, 4814, 4815, 4816, 4817, 4818, 4967, 4968, 4969, 4981, 4982, 4995, 4996, 4997, 4998, 4999, 5000, 5001, 5002, 5003]
     test_indices_subset[:] = [x - 1 for x in test_indices_subset]
     X_names['test'] = [self.names[i] for i in test_indices_subset]
     self.index['test'] = test_indices_subset
     print test_indices_subset
     
     #load x and y in memory
     for kind in ['train', 'valid', 'test']:
         self.X[kind] = [None] * len(X_names[kind])
         self.Y[kind] = [None] * len(X_names[kind])
      
     for kind in ['train', 'valid', 'test']:
         for idx, name in enumerate(X_names[kind]): 
             im = plt.imread('unprocessed_data/'+str(name[0]))
             if socket.gethostname() != 'vajra' and sys.platform != 'darwin':
                 im = misc.imrotate(im, 180.0)
                 im = numpy.fliplr(im)               
             self.X[kind][idx] = im
             
             self.Y[kind][idx] = []
             for pt_no, part in self.ptno_part.items():
                 self.Y[kind][idx].append((self.part_pos[part][0][self.index[kind][idx]], self.part_pos[part][1][self.index[kind][idx]]))
             if idx % 100 == 0:
                 print '{0:d} / {1:d}'.format(idx, len(self.names))
     
     for kind in ['train', 'valid', 'test']:
         print 'no of {0:s}: {1:d}'.format(kind, len(self.X[kind]))
     
     if type == 'SHOULDER':
         self.scale_and_crop_images()
     
     #flip train and valid
     for kind in ['train', 'valid']:
         for idx in range(0, len(self.X[kind])):
             flipped = numpy.fliplr(self.X[kind][idx])
             flip_name = '.'.join(X_names[kind][idx][0].split('.')[0:-1])+'-flipped.jpg'
             self.X[kind].append(flipped)
             X_names[kind].append([flip_name])
             flip_y = [(flipped.shape[1] - self.Y[kind][idx][pt_no][0], self.Y[kind][idx][pt_no][1]) for pt_no in self.ptno_part.keys()]
             remapped = [flip_y[j0] for j0 in [0, 4, 5, 6, 1, 2, 3]]
             flip_y = remapped
             self.Y[kind].append(flip_y)                
             
     for kind in ['train', 'valid', 'test']:
         print 'no of {0:s}: {1:d}'.format(kind, len(self.X[kind]))
       
     #------- Write it all down to disk -------#
     target_imgshape = (240, 320, 3)
     scalefactor = float(target_imgshape[0])/self.X['train'][0].shape[0] 
     print 'Image shape: '
     print self.X['train'][0].shape
     print 'Scalefactor is: ' + str(scalefactor)
     for kind in ['train', 'valid', 'test']:
         jnt_pos_2d = dict()
         print 'writing images for '+ kind
         if not os.path.exists('processed_data/'+ kind):
             os.makedirs('processed_data/' + kind)
         p_imname_tmpl = 'processed_data/' + kind + '/{0:s}.png'    
         for idx in range(0, len(self.X[kind])):  
             if idx % 100 == 0:
                 print '{0:d} / {1:d}'.format(idx, len(self.X[kind]))  
             scaled_im = misc.imresize(self.X[kind][idx], target_imgshape)  
             imname = X_names[kind][idx]
             imname = imname[0].split('/')[-1].split('.')[0]
             imname = p_imname_tmpl.format(imname)
             misc.imsave(imname, scaled_im)
             
             for pt_no, part in self.ptno_part.items():
                 x = self.Y[kind][idx][pt_no][0] * scalefactor 
                 y = self.Y[kind][idx][pt_no][1] * scalefactor 
                 if imname not in jnt_pos_2d.keys():
                     jnt_pos_2d[imname] = [(x, y)]
                 else:
                     jnt_pos_2d[imname].append((x, y)) 
             """
             plt.imshow(scaled_im)
             xs = [jnt[0] for jnt in jnt_pos_2d[imname]]
             ys = [jnt[1] for jnt in jnt_pos_2d[imname]]
             plt.scatter(xs, ys)
             plt.show()
             """
              
                 
         tools.pickle_dump(jnt_pos_2d, 'processed_data/' + kind + '/jnt_pos_2d.pkl')
         al.write(jnt_pos_2d, 'processed_data/' + kind + '/jnt_pos_2d.al')        
コード例 #7
0
ファイル: extract.py プロジェクト: alexprz/meta_ALE
def extract_from_paths(path_dict, data=['coord', 'path'],
                       threshold=1.96, tag=None, load=True):
    """
    Extract data from given images.

    Extracts data (coordinates, paths...) from the data and put it in a
        dictionnary using Nimare structure.

    Args:
        path_dict (dict): Dict which keys are study names and values
            are another dict which keys are map names ('z', 'con', 'se', 't')
            and values are absolute paths (string) to these maps.
        data (list): Data to extract. 'coord' and 'path' available.
        threshold (float): value below threshold are ignored. Used for
            peak detection.
        tag (str): Name of the file to load/dump.
        load (bool): If True, load a potential existing result.
            If False or not found, compute again.

    Returns:
        (dict): Dictionnary storing the coordinates using the Nimare
            structure.

    """
    if tag is not None:
        # Loading previously computed dict if any
        ds_dict = pickle_load(save_dir+tag, load=load)
        if ds_dict is not None:
            return ds_dict

    # Computing a new dataset dictionary
    def extract_pool(name, map_dict):
        """Extract activation for multiprocessing."""
        print(f'Extracting {name}...')

        XYZ = None
        if 'coord' in data:
            XYZ = get_activations(map_dict['z'], threshold)
            if XYZ is None:
                return

        if 'path' in data:
            # base, filename = ntpath.split(path)
            # file, ext = filename.split('.', 1)

            # path_dict = {'z': path}
            # for map_type in ['t', 'con', 'se']:
            #     file_path = f'{base}/{file}_{map_type}.{ext}'
            #     if os.path.isfile(file_path):
            #         path_dict[map_type] = file_path

            # return get_sub_dict(XYZ, path_dict)
            return get_sub_dict(XYZ, map_dict)

        if XYZ is not None:
            return get_sub_dict(XYZ, None)

        return

    n_jobs = multiprocessing.cpu_count()
    res = Parallel(n_jobs=n_jobs, backend='threading')(
        delayed(extract_pool)(name, maps) for name, maps in path_dict.items())

    # Removing potential None values
    res = list(filter(None, res))
    # Merging all dictionaries
    ds_dict = {k: v for k, v in enumerate(res)}

    if tag is not None:
        pickle_dump(ds_dict, save_dir+tag)  # Dumping
    return ds_dict
コード例 #8
0
ファイル: ratings.py プロジェクト: ellamguest/imdb_ratings
def dump_ratings_list():
    ratings_list = read_ratings(ratings_file)
    pickle_dump(ratings_list)