Exemplo n.º 1
0
    def _on_first_grace(self, config):
        should_ping_reviewer = False
        assignee_comments = filter(lambda d: d['who'] == self.data['assignee'],
                                   self.data['comments'])
        author_comments = filter(lambda d: d['who'] == self.data['author'],
                                 self.data['comments'])
        if not assignee_comments:
            # Reviewer hasn't commented at all!
            should_ping_reviewer = True
        else:
            last_review = datetime_parse(assignee_comments[-1]['when'])
            last_push = datetime_parse(self.data['last_push'])
            if last_review < last_push:
                # Reviewer hasn't looked at this since the last push
                should_ping_reviewer = True
            elif not author_comments:
                # Author hasn't commented at all!
                comment = self.api.rand_choice(config['pr_ping'])
                self.api.post_comment(
                    comment.format(author=self.data['author']))
                self.data['status'] = 'commented'
            else:
                # It could be waiting on the assignee or the author. Right now, we just poke them both.
                self.api.post_comment(
                    self.api.rand_choice(config['pr_anon_ping']))

        if should_ping_reviewer:
            # Right now, we just ping the reviewer until he takes a look at this or assigns someone else
            comment = self.api.rand_choice(config['review_ping'])
            self.api.post_comment(
                comment.format(reviewer=self.data['assignee']))
    def test_contributors_update(self):
        '''
        Contributors list (cache) live only for an hour (by default). Once it's outdated,
        the next call to `get_contributors` calls `fetch_contributors`, writes it to the store
        and returns the list. Any calls within the next hour will return the existing contributors
        without calling the API.
        '''
        class TestAPI(APIProvider):
            fetched = False

            def fetch_contributors(self):
                self.fetched = True
                return []

        config = create_config()
        api = TestAPI(config=config, payload={}, store=None)
        self.assertFalse(api.fetched)
        api.get_contributors()
        # No store. This will always call the API.
        self.assertTrue(api.fetched)

        store = TestStore()
        api = TestAPI(config=config, payload={}, store=store)
        self.assertFalse(api.fetched)
        now = datetime.now()
        api.get_contributors()
        data = store.get_object(CONTRIBUTORS_STORE_KEY)
        updated_time = datetime_parse(data['last_update_time'])
        # Store doesn't have contributors. It's been updated for the first time.
        self.assertTrue(updated_time >= now)
        self.assertTrue(api.fetched)

        store = TestStore()
        store.write_object(CONTRIBUTORS_STORE_KEY, {
            'last_update_time': str(now),
            'list': ['booya']
        })
        api = TestAPI(config=config, payload={}, store=store)
        self.assertFalse(api.fetched)
        api.get_contributors()
        data = store.get_object(CONTRIBUTORS_STORE_KEY)
        updated_time = datetime_parse(data['last_update_time'])
        # Called within a cycle - no fetch occurs.
        self.assertEqual(updated_time, now)
        self.assertFalse(api.fetched)

        store = TestStore()
        store.write_object(CONTRIBUTORS_STORE_KEY, {
            'last_update_time': str(now),
            'list': ['booya']
        })
        api = TestAPI(config=config, payload={}, store=store)
        self.assertFalse(api.fetched)
        api.get_contributors(fetch=True)
        # When `fetch` is enabled, API is called regardless.
        self.assertTrue(api.fetched)
        data = store.get_object(CONTRIBUTORS_STORE_KEY)
        updated_time = datetime_parse(data['last_update_time'])
        self.assertTrue(updated_time > now)
Exemplo n.º 3
0
def get_fulfillable_course_runs_for_entitlement(entitlement, course_runs):
    """
    Takes a list of course runs and returns only the course runs, sorted by start date, that:

    1) Are currently running or in the future
    2) A user can enroll in
    3) A user can upgrade in
    4) Are published
    5) Are not enrolled in already for an active session

    These are the only sessions that can be selected for an entitlement.
    """

    enrollable_sessions = []

    enrollments_for_user = CourseEnrollment.enrollments_for_user(
        entitlement.user).filter(mode=entitlement.mode)
    enrolled_sessions = frozenset(
        [str(e.course_id) for e in enrollments_for_user])

    # Only show published course runs that can still be enrolled and upgraded
    now = datetime.datetime.now(UTC)
    for course_run in course_runs:

        # Only courses that have not ended will be displayed
        run_start = course_run.get('start')
        run_end = course_run.get('end')
        is_running = run_start and (not run_end
                                    or datetime_parse(run_end) > now)

        # Only courses that can currently be enrolled in will be displayed
        enrollment_start = course_run.get('enrollment_start')
        enrollment_end = course_run.get('enrollment_end')
        can_enroll = (
            (not enrollment_start or datetime_parse(enrollment_start) < now)
            and (not enrollment_end or datetime_parse(enrollment_end) > now)
            and course_run.get('key') not in enrolled_sessions)

        # Only upgrade-able courses will be displayed
        can_upgrade = False
        for seat in course_run.get('seats', []):
            if seat.get('type') == entitlement.mode:
                upgrade_deadline = seat.get('upgrade_deadline', None)
                can_upgrade = not upgrade_deadline or (
                    datetime_parse(upgrade_deadline) > now)
                break

        # Only published courses will be displayed
        is_published = course_run.get('status') == 'published'

        if is_running and can_upgrade and can_enroll and is_published:
            enrollable_sessions.append(course_run)

    enrollable_sessions.sort(key=lambda session: session.get('start'))
    return enrollable_sessions
 def test_screens_after_earliest_datetime(self):
     datetime = datetime_parse('2018-01-01T00:00:01+00:00')
     video = AnnotatedScreenVideo(INSTITUTION_ID, ROOM_ID, CAMERA_ID,
                                  datetime)
     frame = next(iter(video))
     self.assertEqual(
         set([
             ('aspect_ratio', datetime_parse('2018-01-01T00:00:00+00:00')),
             ('no_from_no_until',
              datetime_parse('2018-01-01T00:00:00+00:00')),
             ('early_from_no_until',
              datetime_parse('2018-01-01T00:00:00+00:00')),
             ('equal_from_no_until',
              datetime_parse('2018-01-01T00:00:00+00:00')),
             ('late_from_no_until',
              datetime_parse('2018-01-01T00:00:00+00:00')),
             ('no_from_early_until',
              datetime_parse('2018-01-01T00:00:00+00:00')),
             ('no_from_equal_until',
              datetime_parse('2018-01-01T00:00:00+00:00')),
             ('no_from_late_until',
              datetime_parse('2018-01-01T00:00:00+00:00')),
         ]),
         set([(screen.screen_id, screen.datetime)
              for screen in self.screen_detector.detect(frame)]),
     )
Exemplo n.º 5
0
def parse_bounding_dates(data):
    if '_start_at' in data:
        start_at = datetime_parse(data['_start_at'])
        if '_end_at' in data:
            end_at = datetime_parse(data['_end_at'])
        else:
            end_at = datetime.datetime.now(pytz.UTC).replace(microsecond=0)
    else:
        return (None, None)

    return (start_at, end_at)
Exemplo n.º 6
0
def parse_bounding_dates(data):
    if '_start_at' in data:
        start_at = datetime_parse(data['_start_at'])
        if '_end_at' in data:
            end_at = datetime_parse(data['_end_at'])
        else:
            end_at = datetime.datetime.now(pytz.UTC).replace(microsecond=0)
    else:
        return (None, None)

    return (start_at, end_at)
Exemplo n.º 7
0
 def parse_post_expires(self, response):
     meta = response.xpath('//meta[@name="robots"]')
     if meta:
         for robot_param in meta.attrib["content"].split(","):
             if robot_param.startswith("unavailable_after"):
                 dt = robot_param.replace("unavailable_after: ", "")
                 return datetime_parse(dt)
Exemplo n.º 8
0
    def get_contributors(self, fetch=False):
        '''
        If `fetch` is disabled and if store exists, then this gets the contributors list from the
        store. Otherwise, this calls the overriddable method, gets the list, writes to the store
        (if it exists) and returns the list.
        '''

        now = datetime.now()

        if self.store:
            contributors = self.store.get_object(CONTRIBUTORS_STORE_KEY)
            if contributors:
                # Force fetch if last updated time is long back.
                timestamp = contributors.get('last_update_time', str(now))
                last_update = datetime_parse(timestamp)
                if (now - last_update).seconds >= CONTRIBUTORS_UPDATE_INTERVAL_HOURS * 3600:
                    fetch = True

            if contributors.get('list') and not fetch:
                return contributors['list']

        self.logger.info('Updating contributors list...')
        contributors = {
            'last_update_time': str(now),
            'list': self.fetch_contributors()
        }

        if self.store:
            self.store.write_object(CONTRIBUTORS_STORE_KEY, data=contributors)

        return contributors['list']
Exemplo n.º 9
0
def _clean_metadata(metadata):
    meta = copy.deepcopy(DEFAULT_META)
    meta.update(metadata or {})  # handles None metadata
    meta['tags'] = set(meta.get('tags', []))  # tags should be a set
    if 'added' in meta and isinstance(meta['added'], ustr):
        meta['added'] = datetime_parse(meta['added'])
    return meta
    def sync_token(self):
        '''
        Prove your ownership over the integration to Github and request a token.
        This token is used for API requests in the future. It has a lifetime,
        and should be sync'ed again later.
        '''
        now = datetime.now(
            self.next_token_sync.tzinfo)  # timezone-aware version
        if now < self.next_token_sync:
            return

        self.logger.debug('Getting auth token with JWT from PEM key...')
        # https://developer.github.com/apps/building-github-apps/authentication-options-for-github-apps/
        since_epoch = int(time.time())
        auth_payload = {
            'iat': since_epoch,
            'exp': since_epoch + 600,  # 10 mins expiration for JWT
            'iss': self.config.integration_id,
        }

        auth = 'Bearer %s' % jwt.encode(auth_payload, self.config.pem_key,
                                        'RS256')
        resp = self._request('POST', self.installation_url, auth=auth)
        self.token = resp.data[
            'token']  # installation token (expires in 1 hour)
        self.logger.debug('Token expires on %s', resp.data['expires_at'])
        self.next_token_sync = datetime_parse(resp.data['expires_at'])
 def test_no_screens_before_earliest_datetime(self):
     datetime = datetime_parse('2017-12-31T23:59:59+00:00')
     video = AnnotatedScreenVideo(INSTITUTION_ID, ROOM_ID, CAMERA_ID,
                                  datetime)
     frame = next(iter(video))
     screens = set(self.screen_detector.detect(frame))
     self.assertEqual(screens, set())
Exemplo n.º 12
0
    def score_document(self, doc):
        """
		doc:
			created_at
			url
			title
			keywords
		"""
        max_id = None
        last_created_date = doc.created_at
        score = 0

        while last_created_date >= doc.created_at:
            results = self.api.get_keyword_tweets(doc.keywords,
                                                  max_id)['statuses']
            print 'got {} tweets'.format(len(results))
            print 'last date', last_created_date

            for tweet in results:
                score += self.compute_score(doc, tweet)
                max_id = int(tweet['id'])
                last_created_date = datetime_parse(tweet['created_at'])
                if last_created_date < doc.created_at:
                    return score

            if len(results) == 0:
                return score

            max_id -= 1
            time.sleep(4)  # rate limit 450 per 30 minutes

        return score
Exemplo n.º 13
0
    def _prepare_for_update(self):
        config = self.get_matched_subconfig()
        if not config:
            return

        self.logger.info('Preparing to post weekly update...')
        assignees = self.join_names(map(lambda n: '@' + n,
                                        config['assignees']))
        week_end = datetime_parse(self.data['post_date'])
        week_start = str((week_end - timedelta(days=7)).date())
        week_end = str(week_end.date())
        newcomers = '\n'.join(map(lambda c: ' - @' + c,
                                  self.data['newcomers']))

        title = config['issue_title'].format(date=week_end)
        body = TEMPLATE.format(assignees=assignees,
                               newcomers=newcomers,
                               start=week_start,
                               end=week_end,
                               owner=self.data['owner'],
                               repo=self.data['repo'])

        owner, repo = config['repo'].split('/')
        with Modifier(self.api, owner=owner, repo=repo):
            self.api.create_issue(title=title,
                                  body=body,
                                  assignees=config.get('assignees', []),
                                  labels=config.get('labels', []))

        # Removing the data, so that next time a PR is opened, we'll start afresh
        self.logger.info('Removing existing data...')
        self.remove_object()
Exemplo n.º 14
0
def datetime_interpretation(raw):
    """Converts human-readable datetime into normal datetime instance.

    Args:
        raw (str or unicode): source string with datetime data.

    Returns:
        datetime: result datetime instance.
        None: if couldn't read value.
    """
    if isinstance(raw, (unicode, str)) and len(raw):
        if isinstance(raw, str):
            try:
                raw = unicode(raw, 'utf-8')
            except UnicodeDecodeError:
                return None
        try:
            for ru, en in RU_EN_MONTHS:
                if ru in raw:
                    raw = raw.replace(ru, en)
                    break
            return datetime_parse(raw)
        except:
            return None
    else:
        return None
Exemplo n.º 15
0
    def _check_pulls(self):
        config = self.get_matched_subconfig()
        if not config:
            return

        for number in self.pr_list['pulls']:
            self.data = self.get_object(key=number)
            self.old_data = deepcopy(self.data)
            last_active = self.data.get('last_active')
            if not last_active:
                continue

            last_active = datetime_parse(last_active)
            now = datetime.now(last_active.tzinfo)
            if (now - last_active).days <= config['grace_period_days']:
                self.logger.debug('PR #%s is stil in grace period', number)
                continue

            self.logger.info(
                "PR #%s has had its time. Something's gonna happen.", number)
            self.data['last_active'] = str(now)

            with Modifier(self.api, number=number):
                self._handle_indiscipline_pr(config)

            if self.old_data != self.data:
                self.write_object(self.data, key=number)
Exemplo n.º 16
0
    def _update_data(self, data={}):
        '''Update the data in this object.'''
        
        # Store the changes to prevent this update from effecting it
        pending_changes = self._changes or {}
        try:
            del self._changes
        except:
            pass
        
        # Map custom fields into our custom fields object
        try:
            custom_field_data = data.pop('custom_fields')
        except KeyError:
            pass
        else:
            self.custom_fields = Custom_Fields(custom_field_data)

        # Map all other dictionary data to object attributes
        for key, value in data.iteritems():
            lookup_key = self._field_type.get(key, key)
            
            # if it's a datetime object, turn into proper DT object
            if lookup_key == 'datetime' or lookup_key == 'date':
                self.__dict__[key] = datetime_parse(value)
            else:
                # Check to see if there's cache data for this item.
                # Will return an object if it's recognized as one.
                self.__dict__[key] = self._redmine.check_cache(lookup_key, value)
        #self.__dict__.update(data)


        # Set the changes dict to track all changes from here on out
        self._changes = pending_changes
 def test_produces_single_frame(self):
     datetime = datetime_parse('2018-01-01T00:00:00+00:00')
     video = AnnotatedScreenVideo(INSTITUTION_ID, ROOM_ID, CAMERA_ID,
                                  datetime)
     frame_iterator = iter(video)
     next(frame_iterator)
     with self.assertRaises(StopIteration):
         next(frame_iterator)
 def test_aspect_ratio(self):
     datetime = datetime_parse('2018-01-01T00:00:00+00:00')
     video = AnnotatedScreenVideo(INSTITUTION_ID, ROOM_ID, CAMERA_ID,
                                  datetime)
     frame = next(iter(video))
     self.assertIn(('aspect_ratio', 133, 100),
                   set([(screen.screen_id, screen.width, screen.height)
                        for screen in self.screen_detector.detect(frame)]))
Exemplo n.º 19
0
    def _isTokenExpired(self):
        # static auth does not expire
        if len(self.api_token_expires) == 0:
            return False

        expiration = datetime_parse(self.api_token_expires)

        return expiration < datetime.datetime.now()
Exemplo n.º 20
0
def convert_string_for_numerical_filter(value: str):
    try:
        return float(value)
    except ValueError:
        try:
            return datetime_parse(value)
        except ParserError:
            raise (ValueError, "neither a float nor a datetime")
Exemplo n.º 21
0
    def __load_data(self):
        """
        Loads in the aerodynamic data from a file.

        :raises ValueError: If an unknown model was defined in the file.
        """

        if "r" in self.mode:
            # Load raw data from file.
            raw_data = flex_load(file_path=self.path,
                                 default_serializer=msgpack,
                                 default_is_gzipped=True)

            # Validate data, raise error if the data is invalid.
            valid, error = self.validate_data(raw_data)
            if not valid:
                raise ValueError("Invalid aerodynamic file.\n" + yaml.dump(error))

            # Read in parameters.
            self.case = raw_data['case']
            self.description = raw_data['description']
            self.lref = raw_data['lref']
            self.sref = raw_data['sref']
            self.latref = raw_data['latref']
            self.mrc = np.array(raw_data['mrc']).reshape((3, 1))

            # Load in the created datetime.
            # The datetime should be stored as a string in the data file, but
            # some serializer, like the yaml loader, will automatically parse it into a
            # datetime object.
            if issubclass(type(raw_data['created']), datetime):
                self.created = raw_data['created']
            else:
                self.created = datetime_parse(raw_data['created'])

            # Loop through the coefficient entries an create an instance of AeroMethodBase for each.
            for c_data in raw_data['coefficients']:
                # Get the class for the specified model.
                model_class = aero_models_registry.get(c_data['model'])

                # Check whether a class for the model was found. If not raise an exception.
                if model_class is None:
                    raise ValueError("Aerodynamic model '{}' unknown.".format(c_data['model']))
                else:
                    # Validate the properties dictionary.
                    result = model_class.validate_data(c_data['properties'])
                    if result is not None:
                        raise ValueError("Invalid aerodynamic file.\n" + result)

                    # If a class was found, create an instance of it and pass the parameters to the
                    # constructor and store the resulting object in the coefficients table.
                    coefficient_model = model_class(**c_data['properties'])

                    # Check whether are parameters are valid.
                    # TODO: Finish writing this.

                    self.add_coefficient(c_data['name'], coefficient_model)
Exemplo n.º 22
0
 def get_download_metadata(self):
     """
     Returns basic metadata about the current CAL-ACCESS snapshot,
     like its size and the last time it was updated while stopping
     short of actually downloading it.
     """
     request = requests.head(self.url)
     return {
         'content-length': int(request.headers['content-length']),
         'last-modified': datetime_parse(request.headers['last-modified'])
     }
 def get_download_metadata(self):
     """
     Returns basic metadata about the current CAL-ACCESS snapshot,
     like its size and the last time it was updated while stopping
     short of actually downloading it.
     """
     request = requests.head(self.url)
     return {
         'content-length': int(request.headers['content-length']),
         'last-modified': datetime_parse(request.headers['last-modified'])
     }
Exemplo n.º 24
0
    def fetch_shares(self, limit=1000, **kwargs):
        """
        Retrieve and save all shares of post
        """
        from facebook_api.models import MASTER_DATABASE  # here, becouse cycling import

        ids = []

        response = api_call('%s/sharedposts' % self.graph_id, **kwargs)
        if response:
            timestamps = dict(
                [(int(post['from']['id']), datetime_parse(post['created_time'])) for post in response['data']])
            ids_new = timestamps.keys()
            # becouse we should use local pk, instead of remote, remove it after pk -> graph_id
            ids_current = map(int, User.objects.filter(pk__in=self.shares_users.get_query_set(
                only_pk=True).using(MASTER_DATABASE).exclude(time_from=None)).values_list('graph_id', flat=True))
            ids_add = set(ids_new).difference(set(ids_current))
            ids_add_pairs = []
            ids_remove = set(ids_current).difference(set(ids_new))

            log.debug('response objects count=%s, limit=%s, after=%s' %
                      (len(response['data']), limit, kwargs.get('after')))
            for post in response['data']:
                graph_id = int(post['from']['id'])
                if sorted(post['from'].keys()) == ['id', 'name']:
                    try:
                        user = get_or_create_from_small_resource(post['from'])
                        ids += [user.pk]
                        # this id in add list and still not in add_pairs (sometimes in response are duplicates)
                        if graph_id in ids_add and graph_id not in map(lambda i: i[0], ids_add_pairs):
                            # becouse we should use local pk, instead of remote
                            ids_add_pairs += [(graph_id, user.pk)]
                    except UnknownResourceType:
                        continue

            m2m_model = self.shares_users.through
            # '(album|post)_id'
            field_name = [f.attname for f in m2m_model._meta.local_fields
                          if isinstance(f, models.ForeignKey) and f.name != 'user'][0]

            # remove old shares without time_from
            self.shares_users.get_query_set_through().filter(time_from=None).delete()

            # in case some ids_add already left
            self.shares_users.get_query_set_through().filter(
                **{field_name: self.pk, 'user_id__in': map(lambda i: i[1], ids_add_pairs)}).delete()

            # add new shares with specified `time_from` value
            get_share_date = lambda id: timestamps[id] if id in timestamps else self.created_time
            m2m_model.objects.bulk_create([m2m_model(
                **{field_name: self.pk, 'user_id': pk, 'time_from': get_share_date(graph_id)}) for graph_id, pk in ids_add_pairs])

        return User.objects.filter(pk__in=ids), response
Exemplo n.º 25
0
    def test_dump(self):
        aero_file = AeroFile(test_path / "data_files" / "aero_1.yaml")
        aero_file.dump(test_path / "data_files" / "aero_2.i.yaml")

        aero_1_data = flex_load(test_path / "data_files" / "aero_1.yaml")
        aero_2_data = flex_load(test_path / "data_files" / "aero_2.i.yaml")

        # Check whether the data in the aero_2_data dictionary is the same as in aero_1_data.
        # Since aero_1_data was written by hand, it doesn't contain all the optional parameters
        # for the coefficient models, while aero_2_data does. So we are only checking the parameters
        # in aero_1_data and ignore any parameter unique to aero_2_data.

        def check_dictionaries(d1, d2):
            for key, value in d1.items():
                # print(f"dict key='{key}, v1='{value}, v2={d2[key]}")
                if isinstance(value, dict):
                    self.assertIsInstance(d2[key], dict)
                    check_dictionaries(value, d2[key])
                elif isinstance(value, float):
                    self.assertAlmostEqual(value, d2[key])
                elif isinstance(value, (list, tuple)):
                    self.assertIsInstance(d2[key], (list, tuple))
                    check_list(value, d2[key])
                elif isinstance(value, set):
                    # It's assumed that sets do not contain lists, dictionaries or
                    # other sets and that both sets should contain the same elements.
                    self.assertIsInstance(d2[key], set)
                    self.assertEqual(len(d2[key] - value), 0)
                else:
                    self.assertEqual(value, d2[key])

        def check_list(l1, l2):
            for i, value in enumerate(l1):
                # print(f"list i='{i}, v1='{value}, v2={l2[i]}")
                if isinstance(value, dict):
                    self.assertIsInstance(l2[i], dict)
                    check_dictionaries(value, l2[i])
                elif isinstance(value, float):
                    self.assertAlmostEqual(value, l2[i])
                elif isinstance(value, (list, tuple)):
                    self.assertIsInstance(l2[i], (list, tuple))
                    check_list(value, l2[i])
                elif isinstance(value, set):
                    # It's assumed that sets do not contain lists, dictionaries or
                    # other sets and that both sets should contain the same elements.
                    self.assertIsInstance(l2[i], set)
                    self.assertEqual(len(l2[i] - value), 0)
                else:
                    self.assertEqual(value, l2[i])

        aero_2_data['created'] = datetime_parse(aero_2_data['created'])

        check_dictionaries(aero_1_data, aero_2_data)
Exemplo n.º 26
0
    def get(self, request):

        url_logs = URLStatusLog.objects.all().values('publication').order_by('publication', '-last_modified'). \
            annotate(last_modified=Max('last_modified')). \
            values_list('publication', 'type', 'publication__date_published_text').order_by('publication')
        all_records = Counter()
        years = []
        if url_logs:
            start_year = 1900
            end_year = 2100
            if request.query_params.get('start_date'):
                start_year = int(request.query_params.get('start_date'))
            if request.query_params.get('end_date'):
                end_year = int(request.query_params.get('end_date'))

            for pub, category, date in url_logs:
                try:
                    date_published = int(datetime_parse(str(date)).year)
                except:
                    date_published = None
                if date_published is not None and start_year <= date_published <= end_year:
                    years.append(date_published)
                    all_records[(date_published, category)] += 1
        else:
            sqs = SearchQuerySet()
            sqs = sqs.filter(**visualization_query_filter(request))
            filtered_pubs = queryset_gen(sqs)
            pubs = Publication.api.primary(pk__in=filtered_pubs)
            for pub in pubs:
                if pub.code_archive_url is not '' and pub.year_published is not None:
                    years.append(pub.year_published)
                    all_records[(pub.year_published,
                                 categorize_url(pub.code_archive_url))] += 1

        group = []
        data = [['x']]
        for name in URLStatusLog.PLATFORM_TYPES:
            group.append(name[0])
            data.append([name[0]])

        for year in sorted(set(years)):
            data[0].append(year)
            index = 1
            for name in URLStatusLog.PLATFORM_TYPES:
                data[index].append(all_records[(year, name[0])])
                index += 1

        return Response(
            {
                "aggregated_data": json.dumps(data),
                "group": json.dumps(group)
            },
            template_name="visualization/code_archived_url_staged_bar.html")
Exemplo n.º 27
0
def _detect_autoscaler(self, minutes=10):
    """Grep syslog for autoscaler.

    Also get remote's date (time), to avoid another (slow) ssh."""

    command = """grep CRON /var/log/syslog | grep autoscaler | tail; date"""
    output = self.remote_cmd(command).splitlines()
    recent_crons, now = output[:-1], output[-1]

    if not recent_crons:
        return False

    # Get timestamp of last cron job
    last_cron = recent_crons[-1].split(self.name)[0]
    log_time = datetime_parse(last_cron).replace(tzinfo=timezone.utc)
    now = datetime_parse(now)
    pause = timedelta(minutes=minutes)

    if log_time + pause < now:
        return False

    return True
    def main():
        """Shows basic usage of the Sheets API.

        Creates a Sheets API service object and prints the names and majors of
        students in a sample spreadsheet:
        https://docs.google.com/spreadsheets/d/1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms/edit
        """
        discoveryUrl = ('https://sheets.googleapis.com/$discovery/rest?'
                        'version=v4')
        service = discovery.build(
            'sheets',
            'v4',
            developerKey="AIzaSyCZ41Zj4_eChwYANXzbDBajEWX8dt3a2Ao",
            discoveryServiceUrl=discoveryUrl)

        spreadsheetId = '1zcCmtkVP4Hjy5uDuN4veMTqYquMRcF-82mMt2kRhnmU'
        rangeName = 'Sheet1!A2:E'
        result = service.spreadsheets().values().get(
            spreadsheetId=spreadsheetId, range=rangeName).execute()
        values = result.get('values', [])

        if not values:
            print('No data found.')
        else:
            for row in fix_rows(values):
                row[0] = datetime_parse(row[0])
                if row[2]:
                    row[2] = datetime_parse(row[2])
                print(
                    'new Milestone("{name}", "{deadline}", {remove}, {marker}, {shift}),'
                    .format(
                        name=row[1],
                        deadline=row[0].strftime("%Y-%m-%d %H:%M"),
                        remove=row[2].strftime('"%Y-%m-%d %H:%M"')
                        if row[2] else "undefined",
                        marker=f'{row[3]}' if row[3] else "undefined",
                        shift=f'{row[4]}' if row[4] else "undefined",
                    ))
Exemplo n.º 29
0
def attribute_list(request):
    organism_id = request.REQUEST.get('organism_id', '')

    if not organism_id:
        organisms = cache.get('organisms', [])
        if organisms:
            organism_id = organisms[0][0]
        else:
            query_organism = {
                "method" : "get_organisms",
                "key": request.user.sessionkey,
            }
            organisms_dict = api_request(query_organism)

            try:
                organism_id = organisms_dict['result']['organisms'][0]['id']
            except KeyError:
                if organisms_dict.has_key('error'):
                    msg = organisms_dict['error']['message']
                    messages.error(request, 'API ERROR: {}. {}'.format(msg, organisms_dict['error']['data']))
                else:
                    raise Http404

    query_dict = {
        "method" : "get_attributes",
        "key": request.user.sessionkey,
        "params" : {
            "query" : "organism = {}".format(organism_id),
            # "limit" : int,
            # "skip" : int,
            "orderby" : [["name", "asc"]]
        }
    }

    content_dict = api_request(query_dict)
    if content_dict.has_key('result'):
        attr_list = content_dict['result']['attributes']

        for attr in attr_list:
            time_value = datetime_parse(attr['created'])
            attr['created'] = time_value.strftime("%Y-%m-%d %H:%M:%S")

        template_context = {'item_list': attr_list}
    else:
        template_context ={}
        msg = content_dict['error']['message']
        messages.error(request, 'API ERROR: {}. {}'.format(msg, content_dict['error']['data']))
        
    return render_to_response("attribute_list.html", template_context, context_instance=RequestContext(request))
    def get_local_metadata(self):
        """
        Retrieves a local file that records past downloads and returns
        a dictionary that includes a timestamp with a timestamp marking
        the last update.

        If no file exists it returns the dictionary with null values.
        """
        metadata = {
            'last-download': None
        }
        if os.path.isfile(self.zip_metadata_path):
            with open(self.zip_metadata_path) as f:
                metadata['last-download'] = datetime_parse(f.readline())
        return metadata
Exemplo n.º 31
0
    def get(self, request):

        url_logs = URLStatusLog.objects.all().values('publication').order_by('publication', '-last_modified'). \
            annotate(last_modified=Max('last_modified')). \
            values_list('publication', 'type', 'publication__date_published_text').order_by('publication')
        all_records = Counter()
        years = []
        if url_logs:
            start_year = 1900
            end_year = 2100
            if request.query_params.get('start_date'):
                start_year = int(request.query_params.get('start_date'))
            if request.query_params.get('end_date'):
                end_year = int(request.query_params.get('end_date'))

            for pub, category, date in url_logs:
                try:
                    date_published = int(datetime_parse(str(date)).year)
                except:
                    date_published = None
                if date_published is not None and start_year <= date_published <= end_year:
                    years.append(date_published)
                    all_records[(date_published, category)] += 1
        else:
            sqs = SearchQuerySet()
            sqs = sqs.filter(**visualization_query_filter(request))
            filtered_pubs = queryset_gen(sqs)
            pubs = Publication.api.primary(pk__in=filtered_pubs)
            for pub in pubs:
                if pub.code_archive_url is not '' and pub.year_published is not None:
                    years.append(pub.year_published)
                    all_records[(pub.year_published, CodeArchiveUrl.categorize_url(pub.code_archive_url))] += 1

        group = []
        data = [['x']]
        for name in URLStatusLog.PLATFORM_TYPES:
            group.append(name[0])
            data.append([name[0]])

        for year in sorted(set(years)):
            data[0].append(year)
            index = 1
            for name in URLStatusLog.PLATFORM_TYPES:
                data[index].append(all_records[(year, name[0])])
                index += 1

        return Response({"aggregated_data": json.dumps(data), "group": json.dumps(group)},
                        template_name="visualization/code_archived_url_staged_bar.html")
Exemplo n.º 32
0
def parse_sunset(sunset, date=None, **kwargs):
    if isinstance(sunset, datetime):
        return sunset

    def f(v, date=date):
        if not date:
            date = datetime.utcnow()
        return date + timedelta(seconds=int(sunset))

    if isinstance(sunset, str) and sunset.strip().isdigit():
        return f(sunset.strip())

    if isinstance(sunset, int):
        return f(sunset)

    return datetime_parse(sunset)
    def process_value(cls, value, python_type):
        """
        process_value returns a datetime, date
        or time object according to a given string
        value and a python type.
        """
        if not dateutil:
            raise ImproperlyConfigured(
                "'python-dateutil' is required to process datetimes"
            )

        return_value = datetime_parse(value)

        if issubclass(python_type, datetime.datetime):
            return return_value
        elif issubclass(python_type, datetime.time):
            return return_value.time()
        elif issubclass(python_type, datetime.date):
            return return_value.date()
Exemplo n.º 34
0
    def upload(self, blob):
        '''
        Upload file to the system
        @param blob: a file content to be store
        @return: file id
        '''
        res = ServiceResult()
        log.info("uploading")
        h = blob_hash(blob)
        dst = os.path.join(self.upload_dir, h)

        with self.get_lock():
            with open(dst, 'wb') as f:
                f.write(blob)

        creation_time = datetime_parse(time.ctime(os.path.getctime(dst)))
        # TODO: remove, use dircache instead
        valid_until = creation_time + self.file_lifetime

        res.result = FileId.construct(self, dst, h)
        return res
Exemplo n.º 35
0
 def get_valid_date(self):
     if self._file_id['valid_until'] == -1:
         return float("infinity")
     else:
         return datetime_parse(self._file_id['valid_until'])
Exemplo n.º 36
0
def get_events(url, api_key, limit, since=None, to=None):
    """
    Get issue's events by URL.
    Handle pagination automatically -
    https://docs.getsentry.com/on-premise/api/pagination/.

    :param url: URL to get events
    :type: str
    :param key: API key
    :type: str
    :param limit: Maximum number of events to return
    :type: int
    :param since: event's min creation datetime
    :type: datetime
    :param to: event's max creation datetime
    :type: datetime
    :rtype: list
    """
    response = requests.get(url, auth=(api_key, ''))

    if not response.ok:
        code = response.status_code

        if code == 404:
            logger.error('Issue not found')
            return []

        detail = response.json()['detail']
        logger.error('Server returned %d: %s', code, detail)
        return []

    if (since or to) is not None:
        events = []

        for event in response.json():
            created = datetime_parse(event['dateCreated'])

            if since is not None and created < since:
                # Set limit to avoid asking for more pages.
                limit = len(events)
                break

            if to is not None and created > to:
                continue

            events.append(event)
    else:
        events = response.json()

    limit -= len(events)

    if limit < 0:
        events = events[:limit]
        limit = 0

    if limit > 0 and response.links['next']['results'] == 'true':
        events.extend(get_events(url=response.links['next']['url'],
                                 api_key=api_key,
                                 limit=limit,
                                 since=since,
                                 to=to))

    return events
Exemplo n.º 37
0
	def __prepare_date(self, date):
		if not date:
			return ""
		if type(date) is str:
			date = datetime_parse(date)
		return date.strftime("%Y-%m-%d")
Exemplo n.º 38
0
def get_objects(request):
    paginate_by = 10
    template_context = {}
    field_filters_dict_sort = {'': []}
    raw_query_str=''
    fields = OBJECT_FIELDS
    saved_query_list = SavedQuery.objects.filter(user=request.user)
    query_history = []

    if request.method == 'POST':
        form = SelectObjects(request=request, data = request.POST)
        query_history = ast.literal_eval(request.POST['query_history'])

        # Load from history
        query_history_step = request.POST['query_history_step']
        if query_history_step:
            where_search = ''
            field_filters_dict_sort={}
            all_attr_type_dict = { key: atype for (key, item, atype) in form.fields['attributes_list'].choices }
            all_attr_type_dict.update(OBJECT_FIELDS_CHOICES_WITH_TYPE)
            if re.findall('\(.+\)', query_history_step):
                old_raw_query_str, query_step_st = re.findall('(\(.+\)) AND (.+)', query_history_step)[0]
            else:
                old_raw_query_str = ''
                query_step_st = query_history_step

            raw_query_str = query_history_step

            if query_step_st:
                if ' AND ' in query_step_st:
                    logic_rel = ' AND '
                    logic_operation = 'ALL'
                else:
                    logic_rel = ' OR '
                    logic_operation = 'ANY'

                for i, attr in enumerate(query_step_st.split(logic_rel)):
                    if attr.find('"') != -1:
                        attr_name, operation, attr_value = re.findall('(.+) (.+) (".+")', attr)[0]
                    else:
                        attr_name, operation, attr_value = attr.split()
                    _attr_name = attr_name.replace('attr.', '') if attr_name.startswith('attr.') else attr_name
                    field_filters_dict_sort[i] = (attr_name, operation, attr_value.replace('"', ''), all_attr_type_dict[_attr_name])

        else:
            where_search = request.POST.get('where_search')
            logic_operation = request.POST.get('select_operand_in') if where_search == 'search_in_results' else request.POST.get('select_operand')
            raw_query_str = request.POST['raw_query_str']
            old_raw_query_re = re.findall('\(.+\)', raw_query_str)
            old_raw_query_str = old_raw_query_re[0] if old_raw_query_re else ''
            field_filters_dict = ast.literal_eval(request.POST['field_filters_dict'])
            if field_filters_dict:
                field_filters_dict_sort={}
                for key, q in field_filters_dict.items():
                    if key:
                        q[1] = mark_safe(q[1])
                        field_filters_dict_sort[int(key)] = [ s.decode('utf8') for s in q ]

        template_context.update({
            'logic_operation': logic_operation,
            'raw_query_str': raw_query_str,
            'old_raw_query_str': old_raw_query_str,
            'query_history_step': query_history_step
        })

        if form.is_valid():
            cd = form.cleaned_data
            paginate_by = int(cd['paginate_by'])
            raw_query = 'organism = {}'.format(cd['organism'])
            if cd['tags']:
                raw_query = raw_query + ' & tags.contains("{}")'.format('","'.join(cd['tags']))

            if raw_query_str:
                prep_raw_query_str = raw_query_str.replace(' AND ', ' & ').replace(' OR ', ' | ')
                raw_query = raw_query + ' & (' + prep_raw_query_str + ')'

            attr_list=[]
            attr_list = cd['attributes_list']

            order_field = request.GET.get('order_by', 'name')
            order_field = cd['sort_by'] if cd['sort_by'] in OBJECT_FIELDS else 'attr.' + cd['sort_by']

            query_dict = {
                "method" : 'get_objects',
                "key": request.user.sessionkey,
                "params" : {
                    "count": 'true',
                    "query" : raw_query,
                    "nulls_filler": "n/a",
                #     "limit" : int,
                #     "skip": int,
                    "orderby" : [(order_field, "asc"),],
                    "attributes_list": cd['attributes_list']
                }
            }

            try:
                content_dict = get_pagination_page(page=1, paginate_by=paginate_by, query_dict=query_dict)
            except socket.error:
                # TODO
                messages.error(request, 'Oops! Not connected to server.')
                return render_to_response("select_objects.html", template_context, context_instance=RequestContext(request))


            if content_dict.has_key('result'):
                if len(cd['display_fields']):
                    display_fields = cd['display_fields']
                else:
                    display_fields = fields

                objects_count = content_dict['result']['total_count']

                object_list = []
                for obj in content_dict['result']['objects']:
                    object_fields=[]
                    for field in display_fields:
                        if field in ('created', 'modified'):
                            time_value = datetime_parse(obj[field])
                            field_value = time_value.strftime("%Y-%m-%d %H:%M:%S")
                            object_fields.append( (field, field_value) )
                        else:
                            object_fields.append( (field, obj[field]) )
                    
                    if obj.has_key('attributes'):
                        object_attrs = [ None for i in attr_list ]
                        for obj_attr in obj['attributes']:
                            attr_index = attr_list.index(obj_attr['name'])
                            object_attrs[attr_index] = obj_attr
                    else:
                        object_attrs = []

                    object_list.append(
                        {'object_name': obj['name'],
                        'url': reverse('update_object', kwargs={'object_id': obj['id']}),
                        'fields': object_fields,
                        # 'attrs': [ d for attr in attr_list for d in obj['attributes'] if d['name'] == attr ]
                        'attrs': object_attrs,
                    })

                previous_page = False
                next_page = True if int(objects_count) > paginate_by else False

                pages_count = int(math.ceil(objects_count/paginate_by))
                if pages_count < 12:
                    pages = range(1, pages_count+1)
                else:
                    pages = range(1, 4)
                    pages.append('...')
                    pages.extend(range(pages_count-2, pages_count+1))

                if query_history:
                    if query_history_step:
                        # Loading from history
                        for step in query_history:
                            if step['query'] == query_history_step.encode('utf8'):
                                step['count'] = objects_count
                                idx = query_history.index(step)
                                query_history = query_history[:idx+1]
                                break
                    elif where_search == 'search_in_results' and query_history[-1]['query'] != raw_query_str.encode('utf8'):
                        # Search in result
                        query_history.append({'query': raw_query_str, 'count': objects_count})
                    else:
                        # Change current query or new query
                        query_history[-1] = {'query': raw_query_str, 'count': objects_count}
                else:
                    query_history.append({'query': raw_query_str, 'count': objects_count})

                display_fields_str = mark_safe(json.dumps(display_fields))
                template_context.update({
                    'display_fields': display_fields,
                    'display_fields_str': display_fields_str,
                    'attributes': attr_list,
                    'object_list': object_list,
                    'object_download_form': DownloadForm(),

                    'has_next': next_page,
                    'has_previous': previous_page,
                    'pages': pages,
                    'this_page': 1,
                    'next_page_number': 2,
                    'paginate_by': paginate_by,
                    'items_count': objects_count,  
                })
            else:   
                msg = content_dict['error']['message']
                messages.error(request, 
                    'API ERROR: {}. {}'.format(msg, content_dict['error']['data']))

            query_dict_str = mark_safe(json.dumps(query_dict))
            template_context.update({
                'query_dict_str': query_dict_str
                })
        else:
            # TODO print validation error

    # For saved query
    elif request.method == 'GET' and request.GET.get('saved_query', None):
        query_name = request.GET['saved_query']
        saved_query = SavedQuery.objects.get(name=query_name)
        form_data = {
            'organism': saved_query.organism_id, 
            'display_fields': saved_query.display_fields, 
            'attributes_list': saved_query.attributes_list,
            'paginate_by': saved_query.paginate_by, 
            'sort_by': saved_query.sort_by
            }
        form = SelectObjects(request=request, data=form_data)

        if saved_query.filter_fields.items():
            field_filters_dict_sort = {}
            for key, q in saved_query.filter_fields.items():
                if key:
                    q[1] = mark_safe(q[1])
                    field_filters_dict_sort[int(key)] = q

        raw_query_str = saved_query.query_str
        old_raw_query_re = re.findall('\(.+\)', raw_query_str)
        old_raw_query_str = old_raw_query_re[0] if old_raw_query_re else ''

        if raw_query_str:
            step = True
            raw_query_str_iter = raw_query_str
            query_history.append({'query': raw_query_str, 'count': ''})
            while step:
                _query_re = re.findall('\((.+)\)', raw_query_str_iter)
                step = _query_re[0] if _query_re else None
                if step:
                    query_history.append({'query': step, 'count': ''})
                    raw_query_str_iter = step

            query_history.reverse()

        template_context.update({
            'logic_operation': "ALL",
            'raw_query_str': raw_query_str,
            'old_raw_query_str': old_raw_query_str,
        })

    else:
        form = SelectObjects(request=request)

    attributes_from_organism = form.fields['attributes_list'].choices

    template_context.update({
        'form': form, 
        'method': 'get_objects',
        'saved_query_list': saved_query_list,
        'fields': fields,
        'fields_with_type': OBJECT_FIELDS_CHOICES_WITH_TYPE,
        'field_filters_dict': field_filters_dict_sort,
        'attributes_from_organism': attributes_from_organism,
        'query_history': query_history
        })

    return render_to_response("select_objects.html", template_context, context_instance=RequestContext(request))
Exemplo n.º 39
0
def pagination(request, page, paginate_by, items_count, data):
    data = json.loads(data)
    display_fields = data['display_fields']
    query_dict = data['query_dict']
    query_dict['params']['count'] = False
    if query_dict['params'].has_key('attributes_list'):
        attributes = [ attr for attr in query_dict['params']['attributes_list'] ]
    else:
        attributes = []

    content_dict = get_pagination_page(page=page, paginate_by=paginate_by, query_dict=query_dict)
    if content_dict.has_key('result'):
        object_list = []
        for obj in content_dict['result']['objects']:
            object_fields=[]
            for field in display_fields:
                if field in ('created', 'modified'):
                    time_value = datetime_parse(obj[field])
                    field_value = time_value.strftime("%Y-%m-%d %H:%M:%S")
                    object_fields.append( (field, field_value) )
                else:
                    object_fields.append( (field, obj[field]) )

            if obj.has_key('attributes'):
                object_attrs = [ None for i in attributes ]
                for obj_attr in obj['attributes']:
                    attr_index = attributes.index(obj_attr['name'])
                    object_attrs[attr_index] = obj_attr
            else:
                object_attrs = []

            object_list.append(
                {'object_name': obj['name'],
                'url': reverse('update_object', kwargs={'object_id': obj['id']}),
                'fields': object_fields,
                'attrs': object_attrs
                }
            )

        next_page = True if paginate_by * page < items_count else False
        previous_page = True if page > 1 else False

        pages_count = int(math.ceil(items_count/paginate_by))
        if pages_count <= 11:
            pages = range(1, pages_count+1)
        elif page == 1 or page == pages_count:
            pages = range(1, 4)
            pages.append('...')
            pages.extend(range(pages_count-2, pages_count+1))
        elif 1 < page <= 6:
            # page in begin
            pages = range(1, page+2)
            pages.append('...')
            pages.extend(range(pages_count-2, pages_count+1))
        elif pages_count-5 <= page < pages_count:
            # page in end
            pages = range(1, 4)
            pages.append('...')
            pages.extend(range(page-1, pages_count+1))
        else:
            # page in middle
            pages = range(1, 4)
            pages.append('...')
            pages.extend(range(page-1, page+2))
            pages.append('...')
            pages.extend(range(pages_count-2, pages_count+1))

        query_dict_str = mark_safe(simplejson.dumps(query_dict))
        display_fields_str = mark_safe(simplejson.dumps(display_fields))
        template_context = {
            'has_next': next_page,
            'has_previous': previous_page,
            'next_page_number': page+1,
            'previous_page_number': page-1,
            'paginate_by': paginate_by,
            'pages': pages,
            'this_page': page,
            'items_count': items_count,

            'display_fields': display_fields,
            'display_fields_str': display_fields_str,
            'attributes': attributes,
            'object_list': object_list,
            'query_dict_str': query_dict_str
        }

    render = render_to_string('object_list.html', template_context)

    dajax = Dajax()
    dajax.assign('#js_object_result_table', 'innerHTML', render)
    dajax.script('stop_show_loading();')
    return dajax.json()
Exemplo n.º 40
0
 def get(self, instance, owner):
     val_str = instance.element.get(self.attrib_name)
     if val_str is None:
         return self.get_optional(instance)
     else:
         return datetime_parse(val_str)