コード例 #1
0
ファイル: video.py プロジェクト: vishwatejharer/funnel
    def video(self):
        data = None
        if self.video_source and self.video_id:
            # Check for cached data
            data = self._video_cache

            if not data:
                data = {
                    'source': self.video_source,
                    'id': self.video_id,
                    'url': self.video_url,
                    'embeddable_url': self.embeddable_video_url,
                    'duration': 0,
                    'uploaded_at': '',
                    'thumbnail': '',
                }
                if self.video_source == 'youtube':
                    youtube_video = requests.get(
                        f'https://www.googleapis.com/youtube/v3/videos?part=snippet,contentDetails&id={self.video_id}&key={app.config["YOUTUBE_API_KEY"]}'
                    ).json()
                    if not youtube_video or 'items' not in youtube_video:
                        raise VideoException(
                            "Unable to fetch data, please check the youtube url or API key"
                        )
                    elif not youtube_video['items']:
                        # Response has zero item for our given video ID.
                        # This will happen if the video has been removed from YouTube.
                        self._source_video_exists = False
                    else:
                        youtube_video = youtube_video['items'][0]

                        data['duration'] = parse_duration(
                            youtube_video['contentDetails']
                            ['duration']).total_seconds()
                        data['uploaded_at'] = parse_isoformat(
                            youtube_video['snippet']['publishedAt'],
                            naive=False)
                        data['thumbnail'] = youtube_video['snippet'][
                            'thumbnails']['medium']['url']
                elif self.video_source == 'vimeo':
                    vimeo_video = requests.get(
                        f'https://vimeo.com/api/v2/video/{self.video_id}.json')
                    if vimeo_video.status_code == 200:
                        vimeo_video = vimeo_video.json()[0]

                        data['duration'] = vimeo_video['duration']
                        # Vimeo returns naive datetime, we will add utc timezone to it
                        data['uploaded_at'] = parse_isoformat(
                            vimeo_video['upload_date'],
                            delimiter=' ').replace(tzinfo=utc)
                        data['thumbnail'] = vimeo_video['thumbnail_medium']
                    else:
                        # Video doesn't exist on Vimeo anymore
                        self._source_video_exists = False
                self._video_cache = data  # using _video_cache setter to set cache
        return data
コード例 #2
0
ファイル: models.py プロジェクト: hasgeek/eventframe
 def import_from(self, data):
     super(ContentMixin, self).import_from(data)
     self.start_datetime = parse_isoformat(data['start_datetime'])
     self.end_datetime = parse_isoformat(data['end_datetime'])
     self.timezone = data['timezone']
     self.location_name = data['location_name']
     self.location_address = data['location_address']
     self.mapmarker = data['mapmarker']
     self.capacity = data['capacity']
     self.allow_waitlisting = data['allow_waitlisting']
     self.allow_maybe = data['allow_maybe']
コード例 #3
0
ファイル: test_utils.py プロジェクト: mohi7solanki/coaster
    def test_parse_isoformat(self):
        assert parse_isoformat(
            '1882-12-11T00:00:00.1234Z') == datetime.datetime(
                1882, 12, 11, 0, 0, 0, 123400)
        assert parse_isoformat('1882-12-11T00:00:00Z'), datetime.datetime(
            1882, 12, 11, 0, 0)
        assert parse_isoformat('1882-12-11T00:00:00.1234Z',
                               naive=False) == datetime.datetime(1882,
                                                                 12,
                                                                 11,
                                                                 0,
                                                                 0,
                                                                 0,
                                                                 123400,
                                                                 tzinfo=UTC)
        assert parse_isoformat('1882-12-11T00:00:00Z',
                               naive=False) == datetime.datetime(1882,
                                                                 12,
                                                                 11,
                                                                 0,
                                                                 0,
                                                                 tzinfo=UTC)

        with self.assertRaises(ParseError):
            parse_isoformat('2019-05-03T05:02:26.340937Z\'')

        with self.assertRaises(ParseError):
            parse_isoformat('2019-05-03T05:02:26.340937Z\'', naive=False)
コード例 #4
0
ファイル: website.py プロジェクト: hasgeek/eventframe
 def import_from(self, data):
     self.uuid = data['uuid']
     self.name = data['name']
     self.title = data['title']
     self.author = data.get('author')
     self.published_at = parse_isoformat(data['published_at'])
     self.properties = data['properties']
コード例 #5
0
ファイル: video.py プロジェクト: vishwatejharer/funnel
 def _video_cache(self):
     data = redis_store.hgetall(self.video_cache_key)
     if data:
         if 'uploaded_at' in data and data['uploaded_at']:
             data['uploaded_at'] = parse_isoformat(data['uploaded_at'],
                                                   naive=False)
         if 'duration' in data and data['duration']:
             data['duration'] = int(data['duration'])
     return data
コード例 #6
0
ファイル: models.py プロジェクト: hasgeek/eventframe
 def import_from(self, data):
     super(ContentMixin, self).import_from(data)
     last = self.last_revision()
     if last and last.updated_at >= parse_isoformat(data['revision_updated_at']):
         # Don't import if data is older than or the same as the last revision
         return
     revision = self.revise()
     revision.title = data['title']
     revision.description = data['description']
     revision.content = data['content']
     revision.template = data['template']
コード例 #7
0
ファイル: models.py プロジェクト: hasgeek/eventframe
 def _data(self):
     if not hasattr(self, '_data_cached'):
         # Get JSON and cache locally
         try:
             r = requests.get('http://funnel.hasgeek.com/%s/json' % self.funnel_name)
             data = r.json() if callable(r.json) else r.json
             sectionmap = dict([(s['title'], s['name']) for s in data['sections']])
             for proposal in data['proposals']:
                 proposal['submitted'] = parse_isoformat(proposal['submitted'])
                 proposal['section_name'] = sectionmap.get(proposal['section'])
                 v = proposal['votes']
                 proposal['votes'] = '+%d' % v if v > 0 else '%d' % v
             self._data_cached = data
         except ConnectionError:
             self._data_cached = {
                 'proposals': [],
                 'sections': [],
                 'space': {},
             }
     return self._data_cached
コード例 #8
0
ファイル: index.py プロジェクト: shezadaibara/hasjob
def index(basequery=None,
          md5sum=None,
          tag=None,
          domain=None,
          location=None,
          title=None,
          showall=True,
          statuses=None,
          batched=True,
          ageless=False,
          template_vars={}):

    is_siteadmin = lastuser.has_permission('siteadmin')
    if basequery is None:
        is_index = True
    else:
        is_index = False
    now = datetime.utcnow()
    if basequery is None and not (g.user or g.kiosk or
                                  (g.board and not g.board.require_login)):
        showall = False
        batched = False

    if basequery is None:
        basequery = JobPost.query

    # Apply request.args filters
    data_filters = {}
    f_types = request.args.getlist('t')
    while '' in f_types:
        f_types.remove('')
    if f_types:
        data_filters['types'] = f_types
        basequery = basequery.join(JobType).filter(JobType.name.in_(f_types))
    f_categories = request.args.getlist('c')
    while '' in f_categories:
        f_categories.remove('')
    if f_categories:
        data_filters['categories'] = f_categories
        basequery = basequery.join(JobCategory).filter(
            JobCategory.name.in_(f_categories))
    r_locations = request.args.getlist('l')
    if location:
        r_locations.append(location['geonameid'])
    f_locations = []
    remote_location = getbool(request.args.get('anywhere')) or False
    for rl in r_locations:
        if isinstance(rl, int) and rl > 0:
            f_locations.append(rl)
        elif rl == 'anywhere':
            remote_location = True
        elif rl.isdigit():
            f_locations.append(int(rl))
        elif rl:
            ld = location_geodata(rl)
            if ld:
                f_locations.append(ld['geonameid'])
    remote_location_query = basequery.filter(
        JobPost.remote_location == True)  # NOQA
    if f_locations:
        locations_query = basequery.join(JobLocation).filter(
            JobLocation.geonameid.in_(f_locations))
    else:
        locations_query = basequery.join(JobLocation)
    if f_locations and remote_location:
        data_filters['locations'] = f_locations
        data_filters['anywhere'] = True
        recency = JobPost.datetime > datetime.utcnow() - agelimit
        basequery = locations_query.filter(recency).union(
            remote_location_query.filter(recency))
    elif f_locations:
        data_filters['locations'] = f_locations
        basequery = locations_query
    elif remote_location:
        data_filters['anywhere'] = True
        # Only works as a positive filter: you can't search for jobs that are NOT anywhere
        basequery = remote_location_query
    if 'currency' in request.args and request.args[
            'currency'] in CURRENCY.keys():
        currency = request.args['currency']
        data_filters['currency'] = currency
        basequery = basequery.filter(JobPost.pay_currency == currency)
        pay_graph = currency
    else:
        pay_graph = False
    if getbool(request.args.get('equity')):
        # Only works as a positive filter: you can't search for jobs that DON'T pay in equity
        data_filters['equity'] = True
        basequery = basequery.filter(JobPost.pay_equity_min != None)  # NOQA
    if 'pay' in request.args or ('pmin' in request.args
                                 and 'pmax' in request.args):
        if 'pay' in request.args:
            f_pay = string_to_number(request.args['pay'])
            f_min = int(f_pay * 0.90)
            f_max = int(f_pay * 1.30)
        else:
            # Legacy URL with min/max values
            f_min = string_to_number(request.args['pmin'])
            f_max = string_to_number(request.args['pmax'])
            f_pay = f_min  # Use min for pay now
        if f_pay is not None and f_min is not None and f_max is not None:
            data_filters['pay'] = f_pay
            basequery = basequery.filter(JobPost.pay_cash_min < f_max,
                                         JobPost.pay_cash_max >= f_min)
    else:
        f_pay = None
        f_min = None
        f_max = None

    if getbool(request.args.get('archive')):
        ageless = True
        data_filters['archive'] = True
        statuses = POSTSTATUS.ARCHIVED

    search_domains = None
    if request.args.get('q'):
        q = for_tsquery(request.args['q'])
        try:
            # TODO: Can we do syntax validation without a database roundtrip?
            db.session.query(db.func.to_tsquery(q)).all()
        except ProgrammingError:
            db.session.rollback()
            g.event_data['search_syntax_error'] = (request.args['q'], q)
            if not request.is_xhr:
                flash(
                    _(u"Search terms ignored because this didn’t parse: {query}"
                      ).format(query=q), 'danger')
        else:
            # Query's good? Use it.
            data_filters['query'] = q
            search_domains = Domain.query.filter(
                Domain.search_vector.match(q, postgresql_regconfig='english'),
                Domain.is_banned == False).options(
                    db.load_only('name', 'title', 'logo_url')).all()  # NOQA
            basequery = basequery.filter(
                JobPost.search_vector.match(q, postgresql_regconfig='english'))

    if data_filters:
        g.event_data['filters'] = data_filters
        showall = True
        batched = True

    # getposts sets g.board_jobs, used below
    posts = getposts(basequery,
                     pinned=True,
                     showall=showall,
                     statuses=statuses,
                     ageless=ageless).all()

    if is_siteadmin or (g.user and g.user.flags.get('is_employer_month')):
        cache_viewcounts(posts)

    if posts:
        employer_name = posts[0].company_name
    else:
        employer_name = u'a single employer'

    if g.user:
        g.starred_ids = set(
            g.user.starred_job_ids(agelimit if not ageless else None))
    else:
        g.starred_ids = set()

    jobpost_ab = session_jobpost_ab()

    # Make lookup slightly faster in the loop below since 'g' is a proxy
    board = g.board
    if board:
        board_jobs = g.board_jobs
    else:
        board_jobs = {}

    if is_index and posts and not g.kiosk:
        # Group posts by email_domain on index page only, when not in kiosk mode
        grouped = OrderedDict()
        for post in posts:
            pinned = post.pinned
            if board is not None:
                blink = board_jobs.get(
                    post.id
                )  # board_jobs only contains the last 30 days, no archive
                if blink is not None:
                    pinned = blink.pinned
            if pinned:
                # Make pinned posts appear in a group of one
                grouped.setdefault(('s', post.hashid), []).append(
                    (pinned, post, bgroup(jobpost_ab, post)))
            elif post.status == POSTSTATUS.ANNOUNCEMENT:
                # Make announcements also appear in a group of one
                grouped.setdefault(('a', post.hashid), []).append(
                    (pinned, post, bgroup(jobpost_ab, post)))
            elif post.domain.is_webmail:
                grouped.setdefault(('ne', post.md5sum), []).append(
                    (pinned, post, bgroup(jobpost_ab, post)))
            else:
                grouped.setdefault(('nd', post.email_domain), []).append(
                    (pinned, post, bgroup(jobpost_ab, post)))
        pinsandposts = None
    else:
        grouped = None
        if g.board:
            pinsandposts = []
            for post in posts:
                pinned = post.pinned
                if board is not None:
                    blink = board_jobs.get(
                        post.id
                    )  # board_jobs only contains the last 30 days, no archive
                    if blink is not None:
                        pinned = blink.pinned
                pinsandposts.append((pinned, post, bgroup(jobpost_ab, post)))
        else:
            pinsandposts = [(post.pinned, post, bgroup(jobpost_ab, post))
                            for post in posts]

    # Pick a header campaign (only if not kiosk or an XHR reload)
    pay_graph_data = None
    if not g.kiosk:
        if g.preview_campaign:
            header_campaign = g.preview_campaign
        else:
            geonameids = g.user_geonameids + f_locations
            header_campaign = Campaign.for_context(CAMPAIGN_POSITION.HEADER,
                                                   board=g.board,
                                                   user=g.user,
                                                   anon_user=g.anon_user,
                                                   geonameids=geonameids)
        if pay_graph:
            pay_graph_data = make_pay_graph(pay_graph,
                                            posts,
                                            rmin=f_min,
                                            rmax=f_max)
    else:
        header_campaign = None

    loadmore = False
    if batched:
        # Figure out where the batch should start from
        startdate = None
        if 'startdate' in request.values:
            try:
                startdate = parse_isoformat(request.values['startdate'])
            except ValueError:
                pass

        batchsize = 32

        # list of posts that were pinned at the time of first load
        pinned_hashids = request.args.getlist('ph')
        # Depending on the display mechanism (grouped or ungrouped), extract the batch
        if grouped:
            if not startdate:
                startindex = 0
                for row in grouped.values():
                    # break when a non-pinned post is encountered
                    if (not row[0][0]):
                        break
                    else:
                        pinned_hashids.append(row[0][1].hashid)
            else:
                # Loop through group looking for start of next batch. See below to understand the
                # nesting structure of 'grouped'
                for startindex, row in enumerate(grouped.values()):
                    # Skip pinned posts when looking for starting index
                    if (row[0][1].hashid not in pinned_hashids
                            and row[0][1].datetime < startdate):
                        break

            batch = grouped.items()[startindex:startindex + batchsize]
            if startindex + batchsize < len(grouped):
                # Get the datetime of the last group's first item
                # batch = [((type, domain), [(pinned, post, bgroup), ...])]
                # batch[-1] = ((type, domain), [(pinned, post, bgroup), ...])
                # batch[-1][1] = [(pinned, post, bgroup), ...]
                # batch[-1][1][0] = (pinned, post, bgroup)
                # batch[-1][1][0][1] = post
                loadmore = batch[-1][1][0][1].datetime
            grouped = OrderedDict(batch)
        elif pinsandposts:
            if not startdate:
                startindex = 0
                for row in pinsandposts:
                    # break when a non-pinned post is encountered
                    if not row[0]:
                        break
                    else:
                        pinned_hashids.append(row[1].hashid)
            else:
                for startindex, row in enumerate(pinsandposts):
                    # Skip pinned posts when looking for starting index
                    if (row[1].hashid not in pinned_hashids
                            and row[1].datetime < startdate):
                        break

            batch = pinsandposts[startindex:startindex + batchsize]
            if startindex + batchsize < len(pinsandposts):
                # batch = [(pinned, post), ...]
                loadmore = batch[-1][1].datetime
            pinsandposts = batch
    if grouped:
        g.impressions = {
            post.id: (pinflag, post.id, is_bgroup)
            for group in grouped.itervalues()
            for pinflag, post, is_bgroup in group
        }
    elif pinsandposts:
        g.impressions = {
            post.id: (pinflag, post.id, is_bgroup)
            for pinflag, post, is_bgroup in pinsandposts
        }

    # Test values for development:
    # if not g.user_geonameids:
    #     g.user_geonameids = [1277333, 1277331, 1269750]
    if not location and 'l' not in request.args and g.user_geonameids and (
            g.user or g.anon_user) and (
                (not g.board.auto_locations) if g.board else True):
        # No location filters? Prompt the user
        ldata = location_geodata(g.user_geonameids)
        location_prompts = [
            ldata[geonameid] for geonameid in g.user_geonameids
            if geonameid in ldata
        ]
    else:
        location_prompts = []

    query_params = request.args.to_dict(flat=False)
    if loadmore:
        query_params.update({
            'startdate': loadmore.isoformat() + 'Z',
            'ph': pinned_hashids
        })
    return dict(pinsandposts=pinsandposts,
                grouped=grouped,
                now=now,
                newlimit=newlimit,
                title=title,
                md5sum=md5sum,
                domain=domain,
                location=location,
                employer_name=employer_name,
                showall=showall,
                is_index=is_index,
                header_campaign=header_campaign,
                loadmore=loadmore,
                location_prompts=location_prompts,
                search_domains=search_domains,
                query_params=query_params,
                is_siteadmin=is_siteadmin,
                pay_graph_data=pay_graph_data,
                paginated=index_is_paginated(),
                template_vars=template_vars)
コード例 #9
0
ファイル: index.py プロジェクト: hasgeek/hasjob
def fetch_jobposts(request_args, request_values, filters, is_index, board, board_jobs, gkiosk, basequery, md5sum, domain, location, title, showall, statusfilter, batched, ageless, template_vars, search_query=None, query_string=None):
    if basequery is None:
        basequery = JobPost.query

    # Apply request.args filters
    data_filters = {}
    f_types = filters.get('t') or request_args.getlist('t')
    while '' in f_types:
        f_types.remove('')
    if f_types:
        data_filters['types'] = f_types
        basequery = basequery.join(JobType).filter(JobType.name.in_(f_types))
    f_categories = filters.get('c') or request_args.getlist('c')
    while '' in f_categories:
        f_categories.remove('')
    if f_categories:
        data_filters['categories'] = f_categories
        basequery = basequery.join(JobCategory).filter(JobCategory.name.in_(f_categories))

    f_domains = filters.get('d') or request_args.getlist('d')
    while '' in f_domains:
        f_domains.remove('')
    if f_domains:
        basequery = basequery.join(Domain).filter(Domain.name.in_(f_domains))

    f_tags = filters.get('k') or request_args.getlist('k')
    while '' in f_tags:
        f_tags.remove('')
    if f_tags:
        basequery = basequery.join(JobPostTag).join(Tag).filter(Tag.name.in_(f_tags))

    data_filters['location_names'] = r_locations = filters.get('l') or request_args.getlist('l')
    if location:
        r_locations.append(location['geonameid'])
    f_locations = []
    remote_location = getbool(filters.get('anywhere') or request_args.get('anywhere')) or False
    if remote_location:
        data_filters['location_names'].append('anywhere')
    for rl in r_locations:
        if isinstance(rl, int) and rl > 0:
            f_locations.append(rl)
        elif rl == 'anywhere':
            remote_location = True
        elif rl.isdigit():
            f_locations.append(int(rl))
        elif rl:
            ld = location_geodata(rl)
            if ld:
                f_locations.append(ld['geonameid'])
    remote_location_query = basequery.filter(JobPost.remote_location == True)  # NOQA
    if f_locations:
        locations_query = basequery.join(JobLocation).filter(JobLocation.geonameid.in_(f_locations))
    else:
        locations_query = basequery.join(JobLocation)
    if f_locations and remote_location:
        data_filters['locations'] = f_locations
        data_filters['anywhere'] = True
        recency = JobPost.state.LISTED
        basequery = locations_query.filter(recency).union(remote_location_query.filter(recency))
    elif f_locations:
        data_filters['locations'] = f_locations
        basequery = locations_query
    elif remote_location:
        data_filters['anywhere'] = True
        # Only works as a positive filter: you can't search for jobs that are NOT anywhere
        basequery = remote_location_query

    currency = filters.get('currency') or request_args.get('currency')
    if currency in CURRENCY.keys():
        data_filters['currency'] = currency
        basequery = basequery.filter(JobPost.pay_currency == currency)
        pay_graph = currency
    else:
        pay_graph = False
    if getbool(filters.get('equity') or request_args.get('equity')):
        # Only works as a positive filter: you can't search for jobs that DON'T pay in equity
        data_filters['equity'] = True
        basequery = basequery.filter(JobPost.pay_equity_min != None)  # NOQA

    if filters.get('pay') or 'pay' in request_args or ('pmin' in request_args and 'pmax' in request_args):
        if 'pay' in request_args or filters.get('pay'):
            f_pay = filters['pay'] if filters.get('pay') else string_to_number(request_args['pay'])
            if f_pay is not None:
                f_min = int(f_pay * 0.90)
                f_max = int(f_pay * 1.30)
            else:
                f_min = None
                f_max = None
        else:
            # Legacy URL with min/max values
            f_min = string_to_number(request_args['pmin'])
            f_max = string_to_number(request_args['pmax'])
            f_pay = f_min  # Use min for pay now
        if f_pay is not None and f_min is not None and f_max is not None:
            data_filters['pay'] = f_pay
            basequery = basequery.filter(JobPost.pay_cash_min < f_max, JobPost.pay_cash_max >= f_min)
    else:
        f_pay = None
        f_min = None
        f_max = None

    if getbool(request_args.get('archive')):
        ageless = True
        data_filters['archive'] = True
        statusfilter = JobPost.state.ARCHIVED

    if query_string:
        data_filters['query'] = search_query
        data_filters['query_string'] = query_string
        basequery = basequery.filter(JobPost.search_vector.match(search_query, postgresql_regconfig='english'))

    if data_filters:
        showall = True
        batched = True

    posts = getposts(basequery, pinned=True, showall=showall, statusfilter=statusfilter, ageless=ageless).all()

    if getbool(request_args.get('embed')):
        embed = True
        if posts:
            limit = string_to_number(request_args.get('limit'))
            if limit is not None:
                posts = posts[:limit]
            else:
                posts = posts[:8]
    else:
        embed = False

    if posts:
        employer_name = posts[0].company_name
    else:
        employer_name = u'a single employer'

    jobpost_ab = session_jobpost_ab()
    if is_index and posts and not gkiosk and not embed:
        # Group posts by email_domain on index page only, when not in kiosk mode
        grouped = OrderedDict()
        for post in posts:
            pinned = post.pinned
            if board is not None:
                blink = board_jobs.get(post.id)  # board_jobs only contains the last 30 days, no archive
                if blink is not None:
                    pinned = blink.pinned
            if pinned:
                # Make pinned posts appear in a group of one
                grouped.setdefault(('s', post.hashid), []).append(
                    (pinned, post, bgroup(jobpost_ab, post)))
            elif post.state.ANNOUNCEMENT:
                # Make announcements also appear in a group of one
                grouped.setdefault(('a', post.hashid), []).append(
                    (pinned, post, bgroup(jobpost_ab, post)))
            elif post.domain.is_webmail:
                grouped.setdefault(('ne', post.md5sum), []).append(
                    (pinned, post, bgroup(jobpost_ab, post)))
            else:
                grouped.setdefault(('nd', post.email_domain), []).append(
                    (pinned, post, bgroup(jobpost_ab, post)))
        pinsandposts = None
    else:
        grouped = None
        if board:
            pinsandposts = []
            for post in posts:
                pinned = post.pinned
                if board is not None:
                    blink = board_jobs.get(post.id)  # board_jobs only contains the last 30 days, no archive
                    if blink is not None:
                        pinned = blink.pinned
                pinsandposts.append((pinned, post, bgroup(jobpost_ab, post)))
        else:
            pinsandposts = [(post.pinned, post, bgroup(jobpost_ab, post)) for post in posts]

    # Pick a header campaign (only if not kiosk or an XHR reload)
    pay_graph_data = None

    loadmore = False
    if batched:
        # Figure out where the batch should start from
        startdate = None
        if 'startdate' in request_values:
            try:
                startdate = parse_isoformat(request_values['startdate'])
            except TypeError:
                abort(400)
            except ValueError:
                pass

        batchsize = 32

        # list of posts that were pinned at the time of first load
        pinned_hashids = request_args.getlist('ph')
        # Depending on the display mechanism (grouped or ungrouped), extract the batch
        if grouped:
            if not startdate:
                startindex = 0
                for row in grouped.values():
                    # break when a non-pinned post is encountered
                    if (not row[0][0]):
                        break
                    else:
                        pinned_hashids.append(row[0][1].hashid)
            else:
                # Loop through group looking for start of next batch. See below to understand the
                # nesting structure of 'grouped'
                for startindex, row in enumerate(grouped.values()):
                    # Skip pinned posts when looking for starting index
                    if (row[0][1].hashid not in pinned_hashids and row[0][1].datetime < startdate):
                        break

            batch = grouped.items()[startindex:startindex + batchsize]
            if startindex + batchsize < len(grouped):
                # Get the datetime of the last group's first item
                # batch = [((type, domain), [(pinned, post, bgroup), ...])]
                # batch[-1] = ((type, domain), [(pinned, post, bgroup), ...])
                # batch[-1][1] = [(pinned, post, bgroup), ...]
                # batch[-1][1][0] = (pinned, post, bgroup)
                # batch[-1][1][0][1] = post
                loadmore = batch[-1][1][0][1].datetime
            grouped = OrderedDict(batch)
        elif pinsandposts:
            if not startdate:
                startindex = 0
                for row in pinsandposts:
                    # break when a non-pinned post is encountered
                    if not row[0]:
                        break
                    else:
                        pinned_hashids.append(row[1].hashid)
            else:
                for startindex, row in enumerate(pinsandposts):
                    # Skip pinned posts when looking for starting index
                    if (row[1].hashid not in pinned_hashids and row[1].datetime < startdate):
                        break

            batch = pinsandposts[startindex:startindex + batchsize]
            if startindex + batchsize < len(pinsandposts):
                # batch = [(pinned, post), ...]
                loadmore = batch[-1][1].datetime
            pinsandposts = batch

    query_params = request_args.to_dict(flat=False)
    if loadmore:
        query_params.update({'startdate': loadmore.isoformat() + 'Z', 'ph': pinned_hashids})
    if location:
        data_filters['location_names'].append(location['name'])
        query_params.update({'l': location['name']})

    if pay_graph:
        pay_graph_data = make_pay_graph(pay_graph, posts, rmin=f_min, rmax=f_max)

    return dict(posts=posts, pinsandposts=pinsandposts, grouped=grouped, newlimit=newlimit, title=title,
        md5sum=md5sum, domain=domain, location=location, employer_name=employer_name,
        showall=showall, f_locations=f_locations, loadmore=loadmore,
        query_params=query_params, data_filters=data_filters,
        pay_graph_data=pay_graph_data, paginated=index_is_paginated(),
        template_vars=template_vars, embed=embed)
コード例 #10
0
ファイル: index.py プロジェクト: dhananjayk/hasjob
def fetch_jobposts(request_args, request_values, filters, is_index, board, board_jobs, gkiosk, basequery, md5sum, domain, location, title, showall, statusfilter, batched, ageless, template_vars, search_query=None, query_string=None):
    if basequery is None:
        basequery = JobPost.query

    # Apply request.args filters
    data_filters = {}
    f_types = filters.get('t') or request_args.getlist('t')
    while '' in f_types:
        f_types.remove('')
    if f_types:
        data_filters['types'] = f_types
        basequery = basequery.join(JobType).filter(JobType.name.in_(f_types))
    f_categories = filters.get('c') or request_args.getlist('c')
    while '' in f_categories:
        f_categories.remove('')
    if f_categories:
        data_filters['categories'] = f_categories
        basequery = basequery.join(JobCategory).filter(JobCategory.name.in_(f_categories))

    f_domains = filters.get('d') or request_args.getlist('d')
    while '' in f_domains:
        f_domains.remove('')
    if f_domains:
        basequery = basequery.join(Domain).filter(Domain.name.in_(f_domains))

    f_tags = filters.get('k') or request_args.getlist('k')
    while '' in f_tags:
        f_tags.remove('')
    if f_tags:
        basequery = basequery.join(JobPostTag).join(Tag).filter(Tag.name.in_(f_tags))

    data_filters['location_names'] = r_locations = filters.get('l') or request_args.getlist('l')
    if location:
        r_locations.append(location['geonameid'])
    f_locations = []
    remote_location = getbool(filters.get('anywhere') or request_args.get('anywhere')) or False
    if remote_location:
        data_filters['location_names'].append('anywhere')
    for rl in r_locations:
        if isinstance(rl, int) and rl > 0:
            f_locations.append(rl)
        elif rl == 'anywhere':
            remote_location = True
        elif rl.isdigit():
            f_locations.append(int(rl))
        elif rl:
            ld = location_geodata(rl)
            if ld:
                f_locations.append(ld['geonameid'])
    remote_location_query = basequery.filter(JobPost.remote_location == True)  # NOQA
    if f_locations:
        locations_query = basequery.join(JobLocation).filter(JobLocation.geonameid.in_(f_locations))
    else:
        locations_query = basequery.join(JobLocation)
    if f_locations and remote_location:
        data_filters['locations'] = f_locations
        data_filters['anywhere'] = True
        recency = JobPost.state.LISTED
        basequery = locations_query.filter(recency).union(remote_location_query.filter(recency))
    elif f_locations:
        data_filters['locations'] = f_locations
        basequery = locations_query
    elif remote_location:
        data_filters['anywhere'] = True
        # Only works as a positive filter: you can't search for jobs that are NOT anywhere
        basequery = remote_location_query

    currency = filters.get('currency') or request_args.get('currency')
    if currency in CURRENCY.keys():
        data_filters['currency'] = currency
        basequery = basequery.filter(JobPost.pay_currency == currency)
        pay_graph = currency
    else:
        pay_graph = False
    if getbool(filters.get('equity') or request_args.get('equity')):
        # Only works as a positive filter: you can't search for jobs that DON'T pay in equity
        data_filters['equity'] = True
        basequery = basequery.filter(JobPost.pay_equity_min != None)  # NOQA

    if filters.get('pay') or 'pay' in request_args or ('pmin' in request_args and 'pmax' in request_args):
        if 'pay' in request_args or filters.get('pay'):
            f_pay = filters['pay'] if filters.get('pay') else string_to_number(request_args['pay'])
            if f_pay is not None:
                f_min = int(f_pay * 0.90)
                f_max = int(f_pay * 1.30)
            else:
                f_min = None
                f_max = None
        else:
            # Legacy URL with min/max values
            f_min = string_to_number(request_args['pmin'])
            f_max = string_to_number(request_args['pmax'])
            f_pay = f_min  # Use min for pay now
        if f_pay is not None and f_min is not None and f_max is not None:
            data_filters['pay'] = f_pay
            basequery = basequery.filter(JobPost.pay_cash_min < f_max, JobPost.pay_cash_max >= f_min)
    else:
        f_pay = None
        f_min = None
        f_max = None

    if getbool(request_args.get('archive')):
        ageless = True
        data_filters['archive'] = True
        statusfilter = JobPost.state.ARCHIVED

    if query_string:
        data_filters['query'] = search_query
        data_filters['query_string'] = query_string
        basequery = basequery.filter(JobPost.search_vector.match(search_query, postgresql_regconfig='english'))

    if data_filters:
        showall = True
        batched = True

    posts = getposts(basequery, pinned=True, showall=showall, statusfilter=statusfilter, ageless=ageless).all()

    if posts:
        employer_name = posts[0].company_name
    else:
        employer_name = u'a single employer'

    jobpost_ab = session_jobpost_ab()
    if is_index and posts and not gkiosk:
        # Group posts by email_domain on index page only, when not in kiosk mode
        grouped = OrderedDict()
        for post in posts:
            pinned = post.pinned
            if board is not None:
                blink = board_jobs.get(post.id)  # board_jobs only contains the last 30 days, no archive
                if blink is not None:
                    pinned = blink.pinned
            if pinned:
                # Make pinned posts appear in a group of one
                grouped.setdefault(('s', post.hashid), []).append(
                    (pinned, post, bgroup(jobpost_ab, post)))
            elif post.state.ANNOUNCEMENT:
                # Make announcements also appear in a group of one
                grouped.setdefault(('a', post.hashid), []).append(
                    (pinned, post, bgroup(jobpost_ab, post)))
            elif post.domain.is_webmail:
                grouped.setdefault(('ne', post.md5sum), []).append(
                    (pinned, post, bgroup(jobpost_ab, post)))
            else:
                grouped.setdefault(('nd', post.email_domain), []).append(
                    (pinned, post, bgroup(jobpost_ab, post)))
        pinsandposts = None
    else:
        grouped = None
        if board:
            pinsandposts = []
            for post in posts:
                pinned = post.pinned
                if board is not None:
                    blink = board_jobs.get(post.id)  # board_jobs only contains the last 30 days, no archive
                    if blink is not None:
                        pinned = blink.pinned
                pinsandposts.append((pinned, post, bgroup(jobpost_ab, post)))
        else:
            pinsandposts = [(post.pinned, post, bgroup(jobpost_ab, post)) for post in posts]

    # Pick a header campaign (only if not kiosk or an XHR reload)
    pay_graph_data = None

    loadmore = False
    if batched:
        # Figure out where the batch should start from
        startdate = None
        if 'startdate' in request_values:
            try:
                startdate = parse_isoformat(request_values['startdate'])
            except ValueError:
                pass

        batchsize = 32

        # list of posts that were pinned at the time of first load
        pinned_hashids = request_args.getlist('ph')
        # Depending on the display mechanism (grouped or ungrouped), extract the batch
        if grouped:
            if not startdate:
                startindex = 0
                for row in grouped.values():
                    # break when a non-pinned post is encountered
                    if (not row[0][0]):
                        break
                    else:
                        pinned_hashids.append(row[0][1].hashid)
            else:
                # Loop through group looking for start of next batch. See below to understand the
                # nesting structure of 'grouped'
                for startindex, row in enumerate(grouped.values()):
                    # Skip pinned posts when looking for starting index
                    if (row[0][1].hashid not in pinned_hashids and row[0][1].datetime < startdate):
                        break

            batch = grouped.items()[startindex:startindex + batchsize]
            if startindex + batchsize < len(grouped):
                # Get the datetime of the last group's first item
                # batch = [((type, domain), [(pinned, post, bgroup), ...])]
                # batch[-1] = ((type, domain), [(pinned, post, bgroup), ...])
                # batch[-1][1] = [(pinned, post, bgroup), ...]
                # batch[-1][1][0] = (pinned, post, bgroup)
                # batch[-1][1][0][1] = post
                loadmore = batch[-1][1][0][1].datetime
            grouped = OrderedDict(batch)
        elif pinsandposts:
            if not startdate:
                startindex = 0
                for row in pinsandposts:
                    # break when a non-pinned post is encountered
                    if not row[0]:
                        break
                    else:
                        pinned_hashids.append(row[1].hashid)
            else:
                for startindex, row in enumerate(pinsandposts):
                    # Skip pinned posts when looking for starting index
                    if (row[1].hashid not in pinned_hashids and row[1].datetime < startdate):
                        break

            batch = pinsandposts[startindex:startindex + batchsize]
            if startindex + batchsize < len(pinsandposts):
                # batch = [(pinned, post), ...]
                loadmore = batch[-1][1].datetime
            pinsandposts = batch

    query_params = request_args.to_dict(flat=False)
    if loadmore:
        query_params.update({'startdate': loadmore.isoformat() + 'Z', 'ph': pinned_hashids})
    if location:
        data_filters['location_names'].append(location['name'])
        query_params.update({'l': location['name']})

    if pay_graph:
        pay_graph_data = make_pay_graph(pay_graph, posts, rmin=f_min, rmax=f_max)

    return dict(posts=posts, pinsandposts=pinsandposts, grouped=grouped, newlimit=newlimit, title=title,
        md5sum=md5sum, domain=domain, location=location, employer_name=employer_name,
        showall=showall, f_locations=f_locations, loadmore=loadmore,
        query_params=query_params, data_filters=data_filters,
        pay_graph_data=pay_graph_data, paginated=index_is_paginated(), template_vars=template_vars)
コード例 #11
0
ファイル: index.py プロジェクト: nhannv/hasjob
def index(basequery=None, type=None, category=None, md5sum=None, domain=None,
        location=None, title=None, showall=True, statuses=None, tag=None, batched=True, ageless=False):

    if basequery is None:
        is_index = True
    else:
        is_index = False

    now = datetime.utcnow()
    if basequery is None and not (g.user or g.kiosk or (g.board and not g.board.require_login)):
        showall = False
        batched = False

    if basequery is None:
        basequery = JobPost.query

    # Apply request.args filters
    data_filters = {}
    f_types = request.args.getlist('t')
    while '' in f_types:
        f_types.remove('')
    if f_types:
        data_filters['types'] = f_types
        basequery = basequery.join(JobType).filter(JobType.name.in_(f_types))
    f_categories = request.args.getlist('c')
    while '' in f_categories:
        f_categories.remove('')
    if f_categories:
        data_filters['categories'] = f_categories
        basequery = basequery.join(JobCategory).filter(JobCategory.name.in_(f_categories))
    r_locations = request.args.getlist('l')
    f_locations = []
    remote_location = getbool(request.args.get('anywhere')) or False
    for rl in r_locations:
        if rl == 'anywhere':
            remote_location = True
        elif rl.isdigit():
            f_locations.append(int(rl))
        elif rl:
            ld = location_geodata(rl)
            if ld:
                f_locations.append(ld['geonameid'])
    remote_location_query = basequery.filter(JobPost.remote_location == True)  # NOQA
    locations_query = basequery.join(JobLocation).filter(JobLocation.geonameid.in_(f_locations))
    if f_locations and remote_location:
        data_filters['locations'] = f_locations
        data_filters['anywhere'] = True
        recency = JobPost.datetime > datetime.utcnow() - agelimit
        basequery = locations_query.filter(recency).union(remote_location_query.filter(recency))
    elif f_locations:
        data_filters['locations'] = f_locations
        basequery = locations_query
    elif remote_location:
        data_filters['anywhere'] = True
        # Only works as a positive filter: you can't search for jobs that are NOT anywhere
        basequery = remote_location_query
    if 'currency' in request.args and request.args['currency'] in CURRENCY.keys():
        currency = request.args['currency']
        data_filters['currency'] = currency
        basequery = basequery.filter(JobPost.pay_currency == currency)
        pay_graph = currency
    else:
        pay_graph = False
    if getbool(request.args.get('equity')):
        # Only works as a positive filter: you can't search for jobs that DON'T pay in equity
        data_filters['equity'] = True
        basequery = basequery.filter(JobPost.pay_equity_min != None)  # NOQA
    if 'pmin' in request.args and 'pmax' in request.args:
        f_min = string_to_number(request.args['pmin'])
        f_max = string_to_number(request.args['pmax'])
        if f_min is not None and f_max is not None:
            data_filters['pay_min'] = f_min
            data_filters['pay_max'] = f_max
            basequery = basequery.filter(JobPost.pay_cash_min < f_max, JobPost.pay_cash_max >= f_min)
    else:
        f_min = None
        f_max = None

    if getbool(request.args.get('archive')):
        ageless = True
        data_filters['archive'] = True
        statuses = POSTSTATUS.ARCHIVED

    search_domains = None
    if request.args.get('q'):
        q = for_tsquery(request.args['q'])
        try:
            # TODO: Can we do syntax validation without a database roundtrip?
            db.session.query(db.func.to_tsquery(q)).all()
        except ProgrammingError:
            db.session.rollback()
            g.event_data['search_syntax_error'] = (request.args['q'], q)
            if not request.is_xhr:
                flash(_(u"Search terms ignored because this didn’t parse: {query}").format(query=q), 'danger')
        else:
            # Query's good? Use it.
            data_filters['query'] = q
            search_domains = Domain.query.filter(
                Domain.search_vector.match(q, postgresql_regconfig='english'), Domain.is_banned == False).options(
                db.load_only('name', 'title', 'logo_url')).all()  # NOQA
            basequery = basequery.filter(JobPost.search_vector.match(q, postgresql_regconfig='english'))

    if data_filters:
        g.event_data['filters'] = data_filters
        showall = True
        batched = True

    # getposts sets g.board_jobs, used below
    posts = getposts(basequery, pinned=True, showall=showall, statuses=statuses, ageless=ageless).all()

    # Cache viewcounts (admin view or not)
    cache_viewcounts(posts)

    if posts:
        employer_name = posts[0].company_name
    else:
        employer_name = u'a single employer'

    if g.user:
        g.starred_ids = set(g.user.starred_job_ids(agelimit))
    else:
        g.starred_ids = set()

    jobpost_ab = session_jobpost_ab()

    # Make lookup slightly faster in the loop below since 'g' is a proxy
    board = g.board
    if board:
        board_jobs = g.board_jobs
    else:
        board_jobs = {}

    if is_index and posts and not g.kiosk:
        # Group posts by email_domain on index page only, when not in kiosk mode
        grouped = OrderedDict()
        for post in posts:
            pinned = post.pinned
            if board is not None:
                blink = board_jobs.get(post.id)  # board_jobs only contains the last 30 days, no archive
                if blink is not None:
                    pinned = blink.pinned
            if pinned:
                # Make pinned posts appear in a group of one
                grouped.setdefault(('s', post.hashid), []).append(
                    (pinned, post, bgroup(jobpost_ab, post)))
            elif post.status == POSTSTATUS.ANNOUNCEMENT:
                # Make announcements also appear in a group of one
                grouped.setdefault(('a', post.hashid), []).append(
                    (pinned, post, bgroup(jobpost_ab, post)))
            elif post.domain.is_webmail:
                grouped.setdefault(('ne', post.md5sum), []).append(
                    (pinned, post, bgroup(jobpost_ab, post)))
            else:
                grouped.setdefault(('nd', post.email_domain), []).append(
                    (pinned, post, bgroup(jobpost_ab, post)))
        pinsandposts = None
    else:
        grouped = None
        if g.board:
            pinsandposts = []
            for post in posts:
                pinned = post.pinned
                if board is not None:
                    blink = board_jobs.get(post.id)  # board_jobs only contains the last 30 days, no archive
                    if blink is not None:
                        pinned = blink.pinned
                pinsandposts.append((pinned, post, bgroup(jobpost_ab, post)))
        else:
            pinsandposts = [(post.pinned, post, bgroup(jobpost_ab, post)) for post in posts]

    # Pick a header campaign (only if not kiosk or an XHR reload)
    pay_graph_data = None
    if not g.kiosk and not request.is_xhr:
        if g.preview_campaign:
            header_campaign = g.preview_campaign
        else:
            if location:
                geonameids = g.user_geonameids + [location['geonameid']]
            else:
                geonameids = g.user_geonameids
            header_campaign = Campaign.for_context(CAMPAIGN_POSITION.HEADER, board=g.board, user=g.user,
                anon_user=g.anon_user, geonameids=geonameids)
        if pay_graph:
            pay_graph_data = make_pay_graph(pay_graph, posts, rmin=f_min, rmax=f_max)
    else:
        header_campaign = None

    loadmore = False
    if batched:
        # Figure out where the batch should start from
        startdate = None
        if 'startdate' in request.values:
            try:
                startdate = parse_isoformat(request.values['startdate'])
            except ValueError:
                pass

        if request.method == 'GET':
            batchsize = 31  # Skipping one for the special stickie that's on all pages
        else:
            batchsize = 32

        # Depending on the display mechanism (grouped or ungrouped), extract the batch
        if grouped:
            if not startdate:
                startindex = 0
            else:
                # Loop through group looking for start of next batch. See below to understand the
                # nesting structure of 'grouped'
                for startindex, row in enumerate(grouped.values()):
                    # Skip examination of pinned listings (having row[0][0] = True)
                    if (not row[0][0]) and row[0][1].datetime < startdate:
                        break

            batch = grouped.items()[startindex:startindex + batchsize]
            if startindex + batchsize < len(grouped):
                # Get the datetime of the last group's first item
                # batch = [((type, domain), [(pinned, post, bgroup), ...])]
                # batch[-1] = ((type, domain), [(pinned, post, bgroup), ...])
                # batch[-1][1] = [(pinned, post, bgroup), ...]
                # batch[-1][1][0] = (pinned, post, bgroup)
                # batch[-1][1][0][1] = post
                loadmore = batch[-1][1][0][1].datetime
            grouped = OrderedDict(batch)
        elif pinsandposts:
            if not startdate:
                startindex = 0
            else:
                for startindex, row in enumerate(pinsandposts):
                    # Skip pinned posts when looking for starting index
                    if (not row[0]) and row[1].datetime < startdate:
                        break

            batch = pinsandposts[startindex:startindex + batchsize]
            if startindex + batchsize < len(pinsandposts):
                # batch = [(pinned, post), ...]
                loadmore = batch[-1][1].datetime
            pinsandposts = batch

    if grouped:
        g.impressions = {post.id: (pinflag, post.id, is_bgroup)
            for group in grouped.itervalues()
            for pinflag, post, is_bgroup in group}
    elif pinsandposts:
        g.impressions = {post.id: (pinflag, post.id, is_bgroup) for pinflag, post, is_bgroup in pinsandposts}

    return render_template('index.html', pinsandposts=pinsandposts, grouped=grouped, now=now,
                           newlimit=newlimit, jobtype=type, jobcategory=category, title=title,
                           md5sum=md5sum, domain=domain, employer_name=employer_name,
                           location=location, showall=showall, tag=tag, is_index=is_index,
                           header_campaign=header_campaign, loadmore=loadmore,
                           search_domains=search_domains,
                           is_siteadmin=lastuser.has_permission('siteadmin'),
                           pay_graph_data=pay_graph_data)
コード例 #12
0
ファイル: test_utils.py プロジェクト: hasgeek/coaster
 def test_parse_isoformat(self):
     assert parse_isoformat("1882-12-11T00:00:00.1234Z") == datetime.datetime(1882, 12, 11, 0, 0, 0, 123400)
     assert parse_isoformat("1882-12-11T00:00:00Z"), datetime.datetime(1882, 12, 11, 0, 0)
     assert parse_isoformat("1882-12-11T00:00:00.1234Z", naive=False) == datetime.datetime(1882, 12, 11, 0, 0, 0, 123400, tzinfo=UTC)
     assert parse_isoformat("1882-12-11T00:00:00Z", naive=False), datetime.datetime(1882, 12, 11, 0, 0, tzinfo=UTC)
コード例 #13
0
ファイル: test_utils.py プロジェクト: barnull/coaster
 def test_parse_isoformat(self):
     self.assertEqual(parse_isoformat("1882-12-11T00:00:00.1234Z"),
                      datetime.datetime(1882, 12, 11, 0, 0, 0, 123400))
     self.assertEqual(parse_isoformat("1882-12-11T00:00:00Z"),
                      datetime.datetime(1882, 12, 11, 0, 0))
コード例 #14
0
ファイル: test_utils.py プロジェクト: jasimmk/coaster
 def test_parse_isoformat(self):
     self.assertEqual(parse_isoformat("1882-12-11T00:00:00.1234Z"), datetime.datetime(1882, 12, 11, 0, 0, 0, 123400))
     self.assertEqual(parse_isoformat("1882-12-11T00:00:00Z"), datetime.datetime(1882, 12, 11, 0, 0))
コード例 #15
0
ファイル: views.py プロジェクト: hasgeek/eventframe
    def _sync(self):
        if self.node.source != 'doattend':
            yield "Unsupported data source, aborting.\n"
            return
        if not self.node.sourceid or not self.node.api_key:
            yield "Source event id and API key are required.\n"
            return
        # All good, start pulling data...
        data_url = 'http://doattend.com/api/events/%s/participants_list.json?api_key=%s' % (
            self.node.sourceid, self.node.api_key)
        yield "Receiving data from DoAttend..."
        r = requests.get(data_url)
        data = r.json() if callable(r.json) else r.json
        yield " OK\n"
        yield "Participant count: %d\n" % len(data['participants'])
        yield "Previously synced count: %d\n\n" % len(self.node.participants)

        by_ticket = {}
        local_tickets = set()
        upstream_tickets = set()
        unindexed = []
        for participant in self.node.participants:
            if participant.ticket is not None:
                by_ticket[participant.ticket] = participant
                local_tickets.add(participant.ticket)
            else:
                unindexed.append(participant)
        plist = data['participants']
        plist.reverse()  # DoAttend list is sorted by most-recent first
        for p in plist:
            upstream_tickets.add(p['Ticket_Number'])
            participant = by_ticket.get(p['Ticket_Number'])
            if participant is None:
                participant = Participant(participant_list=self.node)
                db.session.add(participant)
                participant.ticket = p['Ticket_Number'].strip()
                by_ticket[participant.ticket] = participant
                local_tickets.add(participant.ticket)
            syncinfo = {
                'datetime': parse_isoformat(p['Date']),
                'fullname': p['Name'].strip() if isinstance(p['Name'], basestring) else p['Name'],
                'email': p['Email'].strip() if isinstance(p['Email'], basestring) else p['Email'],
                'ticket_type': p['Ticket_Name'].strip() if isinstance(p['Ticket_Name'], basestring) else p['Ticket_Name'],
            }
            pinfo = p.get('participant_information', [])
            if isinstance(pinfo, dict):
                pinfo = [pinfo]
            for keyval in pinfo:
                key = keyval['desc']
                value = keyval.get('info')
                if key == 'Job Title':
                    syncinfo['jobtitle'] = value.strip() if isinstance(value, basestring) else value
                elif key == 'Company':
                    syncinfo['company'] = value.strip() if isinstance(value, basestring) else value
                elif key == 'Twitter Handle':
                    syncinfo['twitter'] = value.strip() if isinstance(value, basestring) else value
                elif key == 'City':
                    syncinfo['city'] = value.strip() if isinstance(value, basestring) else value
                elif key == 'T-shirt size':
                    syncinfo['tshirt_size'] = value.split('-', 1)[0].strip() if isinstance(value, basestring) else value
            edited = False
            for key, value in syncinfo.items():
                if getattr(participant, key) != value:
                    setattr(participant, key, value)
                    if 'key' == 'email':
                        participant.user = None
                    edited = True
            if edited:
                if participant.id is None:
                    yield "New participant (#%s): %s\n" % (participant.ticket, participant.fullname)
                else:
                    yield "Edited participant (#%s): %s\n" % (participant.ticket, participant.fullname)
        # Check for deleted participants
        removed_tickets = local_tickets - upstream_tickets
        for ticket in removed_tickets:
            participant = by_ticket.get(ticket)
            if participant:
                yield "Removed participant (#%s): %s\n" % (ticket, participant.fullname)
                db.session.delete(participant)
        db.session.commit()
        yield '\nAll done.'
コード例 #16
0
    def test_parse_isoformat(self):
        assert parse_isoformat(
            '1882-12-11T00:00:00.1234Z') == datetime.datetime(
                1882, 12, 11, 0, 0, 0, 123400)
        assert parse_isoformat('1882-12-11T00:00:00Z'), datetime.datetime(
            1882, 12, 11, 0, 0)
        assert parse_isoformat('1882-12-11T00:00:00.1234Z',
                               naive=False) == datetime.datetime(1882,
                                                                 12,
                                                                 11,
                                                                 0,
                                                                 0,
                                                                 0,
                                                                 123400,
                                                                 tzinfo=UTC)
        assert parse_isoformat('1882-12-11T00:00:00Z',
                               naive=False) == datetime.datetime(1882,
                                                                 12,
                                                                 11,
                                                                 0,
                                                                 0,
                                                                 tzinfo=UTC)

        assert parse_isoformat('1882-12-11T00:00:00-06:39',
                               naive=False) == datetime.datetime(
                                   1882, 12, 11, 0, 0, 0, tzinfo=TestTZ())

        assert parse_isoformat('1882-12-11T00:00:00-06:39',
                               naive=True) == datetime.datetime(
                                   1882, 12, 11, 6, 39, 0)

        assert parse_isoformat('1882-12-11T00:00:00',
                               naive=True) == datetime.datetime(
                                   1882, 12, 11, 0, 0, 0)

        with pytest.raises(ValueError):
            # lacking the T delimiter
            assert parse_isoformat(
                '1882-12-11 00:00:00.1234Z') == datetime.datetime(
                    1882, 12, 11, 0, 0, 0, 123400)

        # will pass with delimiter
        assert parse_isoformat('1882-12-11 00:00:00.1234Z',
                               delimiter=' ') == datetime.datetime(
                                   1882, 12, 11, 0, 0, 0, 123400)

        assert parse_isoformat("2012-05-21 23:06:08",
                               naive=False,
                               delimiter=' ') == datetime.datetime(
                                   2012, 5, 21, 23, 6, 8)

        with pytest.raises(ParseError):
            parse_isoformat('2019-05-03T05:02:26.340937Z\'')

        with pytest.raises(ParseError):
            parse_isoformat('2019-05-03T05:02:26.340937Z\'', naive=False)