Exemple #1
0
 def last_modified(self, updated):
     '''
     Generates a locale independent, english timestamp from a datetime
     object
     '''
     updated = as_utc(updated)
     lm = updated.strftime('day, %d month %Y %H:%M:%S GMT')
     day = {
         0: 'Sun',
         1: 'Mon',
         2: 'Tue',
         3: 'Wed',
         4: 'Thu',
         5: 'Fri',
         6: 'Sat'
     }
     lm = lm.replace('day', day[int(updated.strftime('%w'))])
     month = {
         1: 'Jan',
         2: 'Feb',
         3: 'Mar',
         4: 'Apr',
         5: 'May',
         6: 'Jun',
         7: 'Jul',
         8: 'Aug',
         9: 'Sep',
         10: 'Oct',
         11: 'Nov',
         12: 'Dec'
     }
     return lm.replace('month', month[updated.month])
Exemple #2
0
 def last_modified(self, updated):
     '''
     Generates a locale independent, english timestamp from a datetime
     object
     '''
     updated = as_utc(updated)
     lm = updated.strftime('day, %d month %Y %H:%M:%S GMT')
     day ={0:'Sun', 1:'Mon', 2:'Tue', 3:'Wed', 4:'Thu', 5:'Fri', 6:'Sat'}
     lm = lm.replace('day', day[int(updated.strftime('%w'))])
     month = {1:'Jan', 2:'Feb', 3:'Mar', 4:'Apr', 5:'May', 6:'Jun', 7:'Jul',
              8:'Aug', 9:'Sep', 10:'Oct', 11:'Nov', 12:'Dec'}
     return lm.replace('month', month[updated.month])
Exemple #3
0
    def __init__(self, entry, fs_cache):
        self.all_storage_ids = fs_cache.all_storage_ids

        self.object_id = entry['id']
        self.is_folder = entry['is_folder']
        self.storage_id = entry['storage_id']
        # self.parent_id is None for storage objects
        self.parent_id = entry.get('parent_id', None)
        n = entry.get('name', None)
        if not n:
            n = '___'
        self.name = force_unicode(n, 'utf-8')
        self.persistent_id = entry.get('persistent_id', self.object_id)
        self.size = entry.get('size', 0)
        md = entry.get('modified', 0)
        try:
            if isinstance(md, tuple):
                self.last_modified = datetime(*(list(md) + [local_tz]))
            else:
                self.last_modified = datetime.fromtimestamp(md, local_tz)
        except:
            self.last_modified = datetime.fromtimestamp(0, local_tz)
        self.last_mod_string = self.last_modified.strftime('%Y/%m/%d %H:%M')
        self.last_modified = as_utc(self.last_modified)

        if self.storage_id not in self.all_storage_ids:
            raise ValueError(
                'Storage id %s not valid for %s, valid values: %s' %
                (self.storage_id, entry, self.all_storage_ids))

        if self.parent_id == 0:
            self.parent_id = self.storage_id

        self.is_hidden = entry.get('is_hidden', False)
        self.is_system = entry.get('is_system', False)
        self.can_delete = entry.get('can_delete', True)

        self.files = []
        self.folders = []
        fs_cache.id_map[self.object_id] = self
        self.fs_cache = weakref.ref(fs_cache)
        self.deleted = False

        if self.storage_id == self.object_id:
            self.storage_prefix = 'mtp:::%s:::' % self.persistent_id

        self.is_ebook = (not self.is_folder
                         and self.name.rpartition('.')[-1].lower() in bexts)
Exemple #4
0
    def __init__(self, entry, fs_cache):
        self.all_storage_ids = fs_cache.all_storage_ids

        self.object_id = entry['id']
        self.is_folder = entry['is_folder']
        self.storage_id = entry['storage_id']
        # self.parent_id is None for storage objects
        self.parent_id = entry.get('parent_id', None)
        n = entry.get('name', None)
        if not n:
            n = '___'
        self.name = force_unicode(n, 'utf-8')
        self.persistent_id = entry.get('persistent_id', self.object_id)
        self.size = entry.get('size', 0)
        md = entry.get('modified', 0)
        try:
            if isinstance(md, tuple):
                self.last_modified = datetime(*(list(md)+[local_tz]))
            else:
                self.last_modified = datetime.fromtimestamp(md, local_tz)
        except:
            self.last_modified = datetime.fromtimestamp(0, local_tz)
        self.last_mod_string = self.last_modified.strftime('%Y/%m/%d %H:%M')
        self.last_modified = as_utc(self.last_modified)

        if self.storage_id not in self.all_storage_ids:
            raise ValueError('Storage id %s not valid for %s, valid values: %s'%(self.storage_id,
                entry, self.all_storage_ids))

        if self.parent_id == 0:
            self.parent_id = self.storage_id

        self.is_hidden = entry.get('is_hidden', False)
        self.is_system = entry.get('is_system', False)
        self.can_delete = entry.get('can_delete', True)

        self.files = []
        self.folders = []
        fs_cache.id_map[self.object_id] = self
        self.fs_cache = weakref.ref(fs_cache)
        self.deleted = False

        if self.storage_id == self.object_id:
            self.storage_prefix = 'mtp:::%s:::'%self.persistent_id

        self.is_ebook = (not self.is_folder and
                self.name.rpartition('.')[-1].lower() in bexts)
Exemple #5
0
 def dategetter(x):
     x = getattr(x, 'pubdate', None)
     if x is None:
         x = UNDEFINED_DATE
     return as_utc(x)
Exemple #6
0
 def normalize_ui_val(self, val):
     return as_utc(val) if val is not None else None
Exemple #7
0
def UPDATED(dt, *args, **kwargs):
    return E.updated(
        as_utc(dt).strftime('%Y-%m-%dT%H:%M:%S+00:00'), *args, **kwargs)
Exemple #8
0
 def dategetter(x):
     x = getattr(x, 'pubdate', None)
     if x is None:
         x = UNDEFINED_DATE
     return as_utc(x)
Exemple #9
0
    def merge(self, results, min_year, do_asr=True):
        ans = Metadata(_('Unknown'))

        # We assume the shortest title has the least cruft in it
        ans.title = self.length_merge('title', results, null_value=ans.title)

        # No harm in having extra authors, maybe something useful like an
        # editor or translator
        ans.authors = self.length_merge('authors', results,
                null_value=ans.authors, shortest=False)

        # We assume the shortest publisher has the least cruft in it
        ans.publisher = self.length_merge('publisher', results,
                null_value=ans.publisher)

        # We assume the smallest set of tags has the least cruft in it
        ans.tags = self.length_merge('tags', results,
                null_value=ans.tags, shortest=msprefs['fewer_tags'])

        # We assume the longest series has the most info in it
        ans.series = self.length_merge('series', results,
                null_value=ans.series, shortest=False)
        for r in results:
            if r.series and r.series == ans.series:
                ans.series_index = r.series_index
                break

        # Average the rating over all sources
        ratings = []
        for r in results:
            rating = r.rating
            if rating and rating > 0 and rating <= 5:
                ratings.append(rating)
        if ratings:
            ans.rating = int(round(sum(ratings)/len(ratings)))

        # Smallest language is likely to be valid
        ans.language = self.length_merge('language', results,
                null_value=ans.language)

        # Choose longest comments
        ans.comments = self.length_merge('comments', results,
                null_value=ans.comments, shortest=False)

        # Published date
        if min_year:
            for r in results:
                year = getattr(r.pubdate, 'year', None)
                if year == min_year:
                    ans.pubdate = r.pubdate
                    break
            if getattr(ans.pubdate, 'year', None) == min_year:
                min_date = datetime(min_year, ans.pubdate.month, ans.pubdate.day,
                                    tzinfo=utc_tz)
            else:
                min_date = datetime(min_year, 1, 2, tzinfo=utc_tz)
            ans.pubdate = min_date
        else:
            min_date = datetime(3001, 1, 1, tzinfo=utc_tz)
            for r in results:
                if r.pubdate is not None:
                    candidate = as_utc(r.pubdate)
                    if candidate < min_date:
                        min_date = candidate
            if min_date.year < 3000:
                ans.pubdate = min_date

        # Identifiers
        for r in results:
            ans.identifiers.update(r.identifiers)

        # Cover URL
        ans.has_cached_cover_url = bool([r for r in results if
            getattr(r, 'has_cached_cover_url', False)])

        # Merge any other fields with no special handling (random merge)
        touched_fields = set()
        for r in results:
            if hasattr(r, 'identify_plugin'):
                touched_fields |= r.identify_plugin.touched_fields

        for f in touched_fields:
            if f.startswith('identifier:') or not ans.is_null(f):
                continue
            setattr(ans, f, self.random_merge(f, results,
                null_value=getattr(ans, f)))

        if do_asr:
            avg = [x.relevance_in_source for x in results]
            avg = sum(avg)/len(avg)
            ans.average_source_relevance = avg

        return ans
Exemple #10
0
def UPDATED(dt, *args, **kwargs):
    return E.updated(as_utc(dt).strftime('%Y-%m-%dT%H:%M:%S+00:00'), *args, **kwargs)
Exemple #11
0
 def is_equal(x, y):
     if hasattr(x, 'tzinfo'):
         x = as_utc(x)
     if hasattr(y, 'tzinfo'):
         y = as_utc(y)
     return x == y
Exemple #12
0
 def is_equal(x, y):
     if hasattr(x, 'tzinfo'):
         x = as_utc(x)
     if hasattr(y, 'tzinfo'):
         y = as_utc(y)
     return x == y