Пример #1
0
    def __really_writeIndex(self):
        """Do the actual work of writing the index for all subjobs"""
        import os

        all_caches = {}
        for sj_id in range(len(self)):
            if sj_id in self._cachedJobs.keys():
                this_cache = self._registry.getIndexCache( self.__getitem__(sj_id) )
                all_caches[sj_id] = this_cache
                disk_location = self.__get_dataFile(sj_id)
                all_caches[sj_id]['modified'] = os.stat(disk_location).st_ctime
            else:
                if sj_id in self._subjobIndexData.keys():
                    all_caches[sj_id] = self._subjobIndexData[sj_id]
                else:
                    this_cache = self._registry.getIndexCache( self.__getitem__(sj_id) )
                    all_caches[sj_id] = this_cache
                    disk_location = self.__get_dataFile(sj_id)
                    all_caches[sj_id]['modified'] = os.stat(disk_location).st_ctime

        try:
            from Ganga.Core.GangaRepository.PickleStreamer import to_file
            index_file = os.path.join(self._jobDirectory, self._subjob_master_index_name )
            index_file_obj = open( index_file, "w" )
            to_file( all_caches, index_file_obj )
            index_file_obj.close()
        except Exception, err:
            logger.debug( "cache write error: %s" % str(err) )
Пример #2
0
    def __really_writeIndex(self):
        """Do the actual work of writing the index for all subjobs"""
        import os

        all_caches = {}
        for sj_id in range(len(self)):
            if sj_id in self._cachedJobs.keys():
                this_cache = self._registry.getIndexCache(
                    self.__getitem__(sj_id))
                all_caches[sj_id] = this_cache
                disk_location = self.__get_dataFile(sj_id)
                all_caches[sj_id]['modified'] = os.stat(disk_location).st_ctime
            else:
                if sj_id in self._subjobIndexData.keys():
                    all_caches[sj_id] = self._subjobIndexData[sj_id]
                else:
                    this_cache = self._registry.getIndexCache(
                        self.__getitem__(sj_id))
                    all_caches[sj_id] = this_cache
                    disk_location = self.__get_dataFile(sj_id)
                    all_caches[sj_id]['modified'] = os.stat(
                        disk_location).st_ctime

        try:
            from Ganga.Core.GangaRepository.PickleStreamer import to_file
            index_file = os.path.join(self._jobDirectory,
                                      self._subjob_master_index_name)
            index_file_obj = open(index_file, "w")
            to_file(all_caches, index_file_obj)
            index_file_obj.close()
        except Exception, err:
            logger.debug("cache write error: %s" % str(err))
Пример #3
0
    def __really_writeIndex(self, ignore_disk=False):
        """Do the actual work of writing the index for all subjobs
        Args:
            ignore_disk (bool): Optional flag to force the class to ignore all on-disk data when flushing
        """

        all_caches = {}
        if ignore_disk:
            range_limit = self._cachedJobs.keys()
        else:
            range_limit = range(len(self))

        for sj_id in range_limit:
            if sj_id in self._cachedJobs:
                this_cache = self._registry.getIndexCache(
                    self.__getitem__(sj_id))
                all_caches[sj_id] = this_cache
                disk_location = self.__get_dataFile(sj_id)
                all_caches[sj_id]['modified'] = stat(disk_location).st_ctime
            else:
                if sj_id in self._subjobIndexData:
                    all_caches[sj_id] = self._subjobIndexData[sj_id]
                else:
                    this_cache = self._registry.getIndexCache(
                        self.__getitem__(sj_id))
                    all_caches[sj_id] = this_cache
                    disk_location = self.__get_dataFile(sj_id)
                    all_caches[sj_id]['modified'] = stat(
                        disk_location).st_ctime

        try:
            from Ganga.Core.GangaRepository.PickleStreamer import to_file
            index_file = path.join(self._jobDirectory,
                                   self._subjob_master_index_name)
            index_file_obj = open(index_file, "w")
            to_file(all_caches, index_file_obj)
            index_file_obj.close()
        ## Once I work out what the other exceptions here are I'll add them
        except (IOError, ) as err:
            logger.debug("cache write error: %s" % err)
Пример #4
0
    def __really_writeIndex(self, ignore_disk=False):
        """Do the actual work of writing the index for all subjobs
        Args:
            ignore_disk (bool): Optional flag to force the class to ignore all on-disk data when flushing
        """

        all_caches = {}
        if ignore_disk:
            range_limit = self._cachedJobs.keys()
        else:
            range_limit = range(len(self))

        for sj_id in range_limit:
            if sj_id in self._cachedJobs:
                this_cache = self._registry.getIndexCache(self.__getitem__(sj_id))
                all_caches[sj_id] = this_cache
                disk_location = self.__get_dataFile(sj_id)
                all_caches[sj_id]['modified'] = stat(disk_location).st_ctime
            else:
                if sj_id in self._subjobIndexData:
                    all_caches[sj_id] = self._subjobIndexData[sj_id]
                else:
                    this_cache = self._registry.getIndexCache(self.__getitem__(sj_id))
                    all_caches[sj_id] = this_cache
                    disk_location = self.__get_dataFile(sj_id)
                    all_caches[sj_id]['modified'] = stat(disk_location).st_ctime

        try:
            from Ganga.Core.GangaRepository.PickleStreamer import to_file
            index_file = path.join(self._jobDirectory, self._subjob_master_index_name)
            index_file_obj = open(index_file, "w")
            to_file(all_caches, index_file_obj)
            index_file_obj.close()
        ## Once I work out what the other exceptions here are I'll add them
        except (IOError,) as err:
            logger.debug("cache write error: %s" % err)