Esempio n. 1
0
def main():
    with open("pysc2/data/results.pkl", 'rb') as file:
        results = dill_load(file)
    experiment_labels = {
        'starcraft-a':
        "Double Q Learning",
        'starcraft-duel-a':
        "Dueling Double Q Learning",
        'starcraft-prior-a':
        "Double Q Learning with Prioritized Replay",
        'starcraft-prior-duel-a':
        "Dueling Double Q Learning with Prioritized Replay"
    }
    experiments = experiment_labels.keys()
    agent_data = defaultdict(lambda: defaultdict(lambda: []))
    for experiment in experiments:
        for name, data in results.items():
            if name.startswith(experiment):
                agent = data['agent_id']
                times = np_cumsum(data['episode_data']['episode_lengths'])
                rewards = np_array(data['episode_data']['episode_rewards'])
                rewards = rewards[times < MAX_TSTEPS]
                times = times[times < MAX_TSTEPS]
                agent_data[agent][experiment].append((times, rewards))
    experiments_to_plot = ['starcraft-a']
    plot_experiments(experiment_labels, experiments_to_plot, agent_data)
Esempio n. 2
0
    def import_internal_services(self, items, base_dir, sync_internal, is_first):
        """ Imports and optionally caches locally internal services.
        """
        cache_file_path = os.path.join(base_dir, 'config', 'repo', 'internal-cache.dat')

        # sync_internal may be False but if the cache does not exist (which is the case if a server starts up the first time),
        # we need to create it anyway and sync_internal becomes True then. However, the should be created only by the very first
        # worker in a group of workers - the rest can simply assume that the cache is ready to read.
        if is_first and not os.path.exists(cache_file_path):
            sync_internal = True

        if sync_internal:

            # Synchronizing internal modules means re-building the internal cache from scratch
            # and re-deploying everything.

            service_info = []
            internal_cache = {
                'service_info': service_info
            }

            deployed = self.import_services_from_anywhere(items, base_dir)

            for class_ in deployed:
                impl_name = class_.get_impl_name()
                service_info.append({
                    'class_': class_,
                    'mod': inspect.getmodule(class_),
                    'impl_name': impl_name,
                    'service_id': self.impl_name_to_id[impl_name],
                    'is_active': self.services[impl_name]['is_active'],
                    'slow_threshold': self.services[impl_name]['slow_threshold'],
                    'fs_location': inspect.getfile(class_),
                })


            # All set, write out the cache file
            f = open(cache_file_path, 'wb')
            f.write(dill_dumps(internal_cache))
            f.close()

            return deployed

        else:
            deployed = []

            f = open(cache_file_path, 'rb')
            items = bunchify(dill_load(f))
            f.close()

            for item in items.service_info:
                self._visit_class(item.mod, deployed, item.class_, item.fs_location, True,
                    item.service_id, item.is_active, item.slow_threshold)

            return deployed
Esempio n. 3
0
    def import_internal_services(self, items, base_dir, sync_internal,
                                 is_first):
        """ Imports and optionally caches locally internal services.
        """
        cache_file_path = os.path.join(base_dir, 'config', 'repo',
                                       'internal-cache.dat')

        sql_services = {}
        for item in self.odb.get_sql_internal_service_list(
                self.server.cluster_id):
            sql_services[item.impl_name] = {
                'id': item.id,
                'impl_name': item.impl_name,
                'is_active': item.is_active,
                'slow_threshold': item.slow_threshold,
            }

        # sync_internal may be False but if the cache does not exist (which is the case if a server starts up the first time),
        # we need to create it anyway and sync_internal becomes True then. However, the should be created only by the very first
        # worker in a group of workers - the rest can simply assume that the cache is ready to read.
        if is_first and not os.path.exists(cache_file_path):
            sync_internal = True

        if sync_internal:

            # Synchronizing internal modules means re-building the internal cache from scratch
            # and re-deploying everything.

            service_info = []
            internal_cache = {'service_info': service_info}

            logger.info('Deploying and caching internal services (%s)',
                        self.server.name)
            info = self.import_services_from_anywhere(items, base_dir)

            for service in info.to_process:  # type: InRAMService

                class_ = service.service_class
                impl_name = service.impl_name

                service_info.append({
                    'service_class':
                    class_,
                    'mod':
                    inspect.getmodule(class_),
                    'impl_name':
                    impl_name,
                    'service_id':
                    self.impl_name_to_id[impl_name],
                    'is_active':
                    self.services[impl_name]['is_active'],
                    'slow_threshold':
                    self.services[impl_name]['slow_threshold'],
                    'fs_location':
                    inspect.getfile(class_),
                    'deployment_info':
                    '<todo>'
                })

            # All set, write out the cache file
            f = open(cache_file_path, 'wb')
            f.write(dill_dumps(internal_cache))
            f.close()

            logger.info('Deployed and cached %d internal services (%s) (%s)',
                        len(info.to_process), info.total_size_human,
                        self.server.name)

            return info.to_process

        else:
            logger.info('Deploying cached internal services (%s)',
                        self.server.name)
            to_process = []

            try:
                f = open(cache_file_path, 'rb')
                dill_items = dill_load(f)
            except ValueError as e:
                msg = e.args[0]
                if _unsupported_pickle_protocol_msg in msg:
                    msg = msg.replace(_unsupported_pickle_protocol_msg,
                                      '').strip()
                    protocol_found = int(msg)

                    # If the protocol found is higher than our own, it means that the cache
                    # was built a Python version higher than our own, we are on Python 2.7
                    # and cache was created under Python 3.4. In such a case, we need to
                    # recreate the cache anew.
                    if protocol_found > highest_pickle_protocol:
                        logger.info(
                            'Cache pickle protocol found `%d` > current highest `%d`, forcing sync_internal',
                            protocol_found, highest_pickle_protocol)
                        return self.import_internal_services(
                            items, base_dir, True, is_first)

                    # A different reason, re-raise the erorr then
                    else:
                        raise

                # Must be a different kind of a ValueError, propagate it then
                else:
                    raise
            finally:
                f.close()

            len_si = len(dill_items['service_info'])

            for idx, item in enumerate(dill_items['service_info'], 1):
                class_ = self._visit_class(item['mod'], item['service_class'],
                                           item['fs_location'], True)
                to_process.append(class_)

            self._store_in_ram(None, to_process)

            logger.info('Deployed %d cached internal services (%s)', len_si,
                        self.server.name)

            return to_process
Esempio n. 4
0
 def loadFile(path):
     try:
         with path.open('rb') as lfile:
             return dill_load(lfile)
     except (PicklingError, ):
         raise LoadError(dill.__name__)
def load_dill_from_file_on_disk(path,
                                dill_load=dill_load):
    with open(path, 'rb') as file:
        dill_load(file)