Example #1
0
File: run.py Project: gmc-dev/hours
def main(argv):
    if len(argv) == 1:
        print 'Command missing'
        sys.exit(1)

    elif argv[1] == 'update_jira':
        date = None
        if len(argv) >= 3:
            date = date_parse(argv[2]).date()
        update_jira(date)

    elif argv[1] == 'send_email':
        end_date = datetime.datetime.now().date()
        email = config['email']['default_to']

        start_date = end_date - datetime.timedelta(days=1)
        while start_date.isoweekday() > 5:
            start_date -= datetime.timedelta(days=1)

        if len(argv) > 2:
            start_date = date_parse(argv[2]).date()

            if len(argv) > 3:
                end_date = date_parse(argv[3]).date()

                if len(argv) > 4:
                    email = argv[3]

        send_email(start_date, end_date, email)

    elif argv[1] == 'get_companies':
        print get_companies()
 def output(self):
     try:
         parts = os.path.basename(self.src).split("-")
         ext = parts[-1]
         # XXX this parsing stuff is insane...
         if ext.startswith("probe") or \
                 ext.startswith("backend"):
             date = date_parse('-'.join(parts[-5:-2]))
             asn = parts[-2]
             test_name = '-'.join(parts[:-5])
         elif parts[0].startswith("report"):
             date = date_parse('-'.join(parts[-3:-1]+parts[-1].split(".")[:1]))
             asn = "ASX"
             test_name = '-'.join(parts[1:-3])
             ext = "probe."+'.'.join(parts[-1].split(".")[1:])
         else:
             date = date_parse('-'.join(parts[-4:-1]))
             asn = parts[-1].split(".")[0]
             ext = "probe."+'.'.join(parts[-1].split(".")[1:])
             test_name = '-'.join(parts[:-4])
         # To facilitate sorting and splitting around "-" we convert the
         # date to be something like: 20150101T000015Z
         timestamp = date.strftime("%Y%m%dT%H%M%SZ")
         filename = "{date}-{asn}-{test_name}-{df_version}-{ext}".format(
             date=timestamp,
             asn=asn,
             test_name=test_name,
             df_version="v1",
             ext=ext.replace(".gz", "").replace(".yamloo", ".yaml")
         )
         uri = os.path.join(self.dst, date.strftime("%Y-%m-%d"), filename)
         return S3Target(uri)
     except Exception:
         return S3Target(os.path.join(self.dst, "failed",
                                      os.path.basename(self.src)))
Example #3
0
def verify_certificate(environ, verify_key, validity_start_key,
                       validity_end_key):
    """
    Checks if the client certificate is valid. Start and end data is optional,
    as not all SSL mods give that information.

    :param environ: The WSGI environment.
    :param verify_key: The key for the value in the environment where it was
        stored if the certificate is valid or not.
    :param validity_start_key: The key for the value in the environment with
        the encoded datetime that indicates the start of the validity range.
    :param validity_end_key: The key for the value in the environment with the
        encoded datetime that indicates the end of the validity range.
    """
    verified = environ.get(verify_key)
    validity_start = environ.get(validity_start_key)
    validity_end = environ.get(validity_end_key)
    if verified != 'SUCCESS':
        return False

    if validity_start is None or validity_end is None:
        return True

    validity_start = date_parse(validity_start)
    validity_end = date_parse(validity_end)

    if validity_start.tzinfo != _TZ_UTC or validity_end.tzinfo != _TZ_UTC:
        # Can't consider other timezones
        return False

    now = datetime.utcnow().replace(tzinfo=_TZ_UTC)
    return validity_start <= now <= validity_end
Example #4
0
def parse_playlist_form(fp):
    show_date = None
    doc = html_parse(fp)
    inputs = {}
    for input in doc.getroot().body.xpath('//input[@type="text"]'):
        if input.attrib['name'] == 'date':
            show_date = date_parse(input.attrib['value'])
        elif PATTERN.match(input.attrib['name']):
            field,num = PATTERN.match(input.attrib['name']).groups()
            if field == 'timestamp':
                try:
                    time = date_parse(input.attrib['value'])
                    real_date = date_parse(show_date.strftime('%Y-%m-%d') + ' ' + time.strftime('%H:%M:%S'))
                    d = inputs.setdefault(int(num),{})
                    d['unix_time'] = mktime(real_date.timetuple())
                    d['date_str'] = real_date.strftime('%Y-%m-%d')
                    d['time_str'] = real_date.strftime('%H:%M:%S')
                except: pass # 'auto' probably
            else:
                try:
                    value = input.attrib['value']
                    inputs.setdefault(int(num),{})[field] = value
                except KeyError: pass
                
    show = []
    for k in sorted(inputs):
        d = dict((key,inputs[k].get(key,None)) for key in KEEPER_KEYS)
        if filter(None,d.values()):
            show.append(d)
    return (show[0]['date_str'],show)
def is_date(string):
    try: 
        date_parse(string)
        if string == "at" or string =="on" or string == "." or string == ",":
            return False
        return True
    except ValueError:
        return False
Example #6
0
def get_entries_in_month(month_to_check, entries):
    time_periods_entries = []
    month_as_date = date_parse(month_to_check)
    for entry in entries:
        entry_start = date_parse(entry['start_date'])
        if month_as_date.month == entry_start.month and month_as_date.year == entry_start.year:
            time_periods_entries.append(entry)
    return time_periods_entries
Example #7
0
 def parse(cls, raw):
     try:
         start_date = date_parse(raw['start_date'])
         end_date = date_parse(raw['end_date'])
     except (AttributeError, ValueError, TypeError):
         raise ValueError("'start_date' or 'end_date' invalid.")
     if start_date >= end_date:
         raise ValueError("'start_date' must be < 'end_date'.")
     return cls(start_date, end_date)
Example #8
0
def load_resources(bucket, prefix, region, account_config, accounts,
                   assume, start, end, resources, store, db, verbose, debug):
    """load resources into resource database."""
    logging.basicConfig(level=(verbose and logging.DEBUG or logging.INFO))
    logging.getLogger('botocore').setLevel(logging.WARNING)
    logging.getLogger('s3transfer').setLevel(logging.WARNING)
    start = date_parse(start)
    end = date_parse(end)

    if not resources:
        resources = ['NetworkInterface', 'Instance', 'LoadBalancer']

    account_map = {}
    data = yaml.safe_load(account_config.read())
    for a in data.get('accounts', ()):
        if accounts and (a['name'] in accounts or a['account_id'] in accounts):
            account_map[a['account_id']] = a
        elif not accounts:
            account_map[a['account_id']] = a
    account_ids = list(account_map)

    executor = ProcessPoolExecutor
    if debug:
        from c7n.executor import MainThreadExecutor
        MainThreadExecutor.c7n_async = False
        executor = MainThreadExecutor

    stats = Counter()
    t = time.time()
    with executor(max_workers=multiprocessing.cpu_count()) as w:
        futures = {}
        for a in account_ids:
            for r in resources:
                futures[w.submit(
                    process_account_resources, a, bucket, prefix,
                    region, store, start, end, r)] = (a, r)

        indexer = RESOURCE_FILE_INDEXERS[r]
        for f in as_completed(futures):
            a, r = futures[f]
            if f.exception():
                log.error("account:%s error:%s", a, f.exception())
                continue
            files, dl_stats = f.result()
            idx_stats = indexer(db, resource_config_iter(files))
            log.info(
                "loaded account:%s files:%d bytes:%s events:%d resources:%d idx-time:%d dl-time:%d",
                account_map[a]['name'], len(files),
                human_size(dl_stats['DownloadSize'] + dl_stats['CacheSize']),
                idx_stats['Records'],
                idx_stats['RowCount'],
                idx_stats['IndexTime'],
                dl_stats['FetchTime'])
            stats.update(dl_stats)
            stats.update(idx_stats)
    log.info("Loaded %d resources across %d accounts in %0.2f",
             stats['RowCount'], len(account_ids), time.time() - t)
Example #9
0
    def process(self, user_handle, **kwargs):
        """
            Determine the edit rate of user(s).  The parameter *user_handle*
            can be either a string or an integer or a list of these types.
            When the *user_handle* type is integer it is interpreted as a user
            id, and as a user_name for string input.  If a list of users is
            passed to the *process* method then a dict object with edit rates
            keyed by user handles is returned.

            - Parameters:
                - **user_handle** - String or Integer (optionally lists).
                    Value or list of values representing user handle(s).

            - Return:
                - Dictionary. key(string): user handle, value(Float):
                edit counts
        """

        # Extract edit count for given parameters
        edit_rate = list()
        ec_kwargs = deepcopy(self.__dict__)
        e = ec.EditCount(**ec_kwargs).process(user_handle, **kwargs)

        # Compute time difference between datetime objects and get the
        # integer number of seconds

        if self.group == umpt.REGISTRATION:
            time_diff_sec = self.t * 3600.0
        elif self.group == umpt.INPUT:
            try:
                start_ts_obj = date_parse(
                    format_mediawiki_timestamp(self.datetime_start))
                end_ts_obj = date_parse(
                    format_mediawiki_timestamp(self.datetime_end))
            except (AttributeError, ValueError):
                raise um.UserMetricError()

            time_diff_sec = (end_ts_obj - start_ts_obj).total_seconds()
        else:
            raise um.UserMetricError('group parameter not specified.')

        # Normalize the time interval based on the measure
        if self.time_unit == self.TIME_UNIT_TYPE.DAY:
            time_diff = time_diff_sec / (24 * 60 * 60)
        elif self.time_unit == self.TIME_UNIT_TYPE.HOUR:
            time_diff = time_diff_sec / (60 * 60)
        else:
            time_diff = time_diff_sec

        # Build the list of edit rate metrics
        for i in e.__iter__():
            new_i = i[:]  # Make a copy of the edit count element
            new_i.append(new_i[1] / (time_diff * self.time_unit_count))
            new_i.append(time_diff)
            edit_rate.append(new_i)
        self._results = edit_rate
        return self
    def process_events(self, events):
        enrollments = []
        for event in events['Events']:
            section_data = event['Section']
            course_data = section_data['Course']

            section = Section()
            section.term = Term(quarter=course_data['Quarter'],
                                year=course_data['Year'])
            section.curriculum_abbr = course_data['CurriculumAbbreviation']
            section.course_number = course_data['CourseNumber']
            section.section_id = section_data['SectionID']
            section.is_primary_section = True
            section.linked_section_urls = []

            if ('PrimarySection' in event and
                    'Course' in event['PrimarySection']):
                primary_course = event['PrimarySection']['Course']
                if primary_course:
                    section.is_primary_section = False
                    section.primary_section_curriculum_abbr = \
                        primary_course['CurriculumAbbreviation']
                    section.primary_section_course_number = \
                        primary_course['CourseNumber']
                    section.primary_section_id = \
                        event['PrimarySection']['SectionID']

            try:
                data = {
                    'Section': section,
                    'Role': EnrollmentModel.STUDENT_ROLE,
                    'UWRegID': event['Person']['UWRegID'],
                    'Status': self._enrollment_status(event, section),
                    'LastModified': date_parse(event['LastModified']),
                    'InstructorUWRegID': event['Instructor']['UWRegID'] if (
                        'Instructor' in event and event['Instructor'] and
                        'UWRegID' in event['Instructor']) else None
                }

                if 'Auditor' in event and event['Auditor']:
                    data['Role'] = EnrollmentModel.AUDITOR_ROLE

                if 'RequestDate' in event:
                    data['RequestDate'] = date_parse(event['RequestDate'])

                enrollments.append(data)
            except UnhandledActionCodeException:
                self._log.warning("%s UNKNOWN %s for %s at %s" % (
                    log_prefix,
                    event['Action']['Code'],
                    event['Person']['UWRegID'],
                    event['LastModified']))
                pass

        self.load_enrollments(enrollments)
def _process_help(args):

    # Unpack args
    state = args[1]
    users = args[0]

    thread_args = um.UserMetric._unpack_params(state)

    # Log progress
    if thread_args.log_:
        logging.debug(__name__ + '::Computing live account. (PID = %s)' %
                                 getpid())

    # Extract edit button click from edit_page_tracking table (namespace,
    # article title, timestamp) of first click and registration timestamps
    # (join on logging table)
    #
    # Query will return: (user id, time of registration, time of first
    # edit button click)
    query_args = namedtuple('QueryArgs', 'namespace')(thread_args.namespace)
    query_results = query_mod.live_account_query(users, thread_args.project,
                                                 query_args)

    # Iterate over results to determine boolean indicating whether
    # account is "live"

    results = {str(user): -1 for user in users}

    user_reg = query_mod.user_registration_date_logging(
        users, thread_args.project, None)

    # uid: diff_time
    user_reg = {str(r[0]): (datetime.now() - date_parse(r[1])).
                            total_seconds() / 3600 for r in user_reg}

    # Flag all users alive longer than t hours as "not invalid"
    for user in results:
        if user in user_reg and user_reg[user] >= thread_args.t:
                results[user] = 0

    for row in query_results:
        user = str(row[0])
        try:
            # get the difference in hours
            diff = (date_parse(row[2]) - date_parse(row[1])).total_seconds()
            diff /= 3600
        except Exception:
            continue

        if diff <= thread_args.t:
            results[user] = 1
        else:
            results[user] = 0

    return [(str(key), results[key]) for key in results]
 def to_datetime(self, v, format=None):
     if v == "NULL":
         return None
     elif format is None:
         return date_parse(v) #much slower than strptime, avoid for large jobs
     else:
         try:
             return datetime.datetime.strptime(v, format)
         except ValueError:
             if self._log:
                 self._log.warning( "Using the slower date parse: for value '%s' as format '%s' has failed",
                                    v, format)
             return date_parse(v)
Example #13
0
def __get_file_names(start_dt, end_dt=None):

    if not end_dt:
        start_dt = date_parse(start_dt)
        return [start_dt.strftime("%Y-%m-%d")], __get_time_range(start_dt, start_dt)

    start_dt = date_parse(start_dt)
    end_dt = date_parse(end_dt)

    dates = __daterange(start_dt, end_dt)
    time_tuple = __get_time_range(start_dt, end_dt)

    return [d.strftime("%Y-%m-%d") for d in dates], time_tuple
    def test_implicit_date_filters(self):
        john = Person.objects.get(name="John")
        # Mark was created at least one second after John.
        mark = Person.objects.get(name="Mark")

        from rest_framework import serializers
        from rest_framework.renderers import JSONRenderer

        class PersonSerializer(serializers.ModelSerializer):
            class Meta:
                model = Person

        # Figure out what the date strings should look like based on the
        # serializer output.
        data = PersonSerializer(john).data

        date_str = JSONRenderer().render(data['date_joined']).decode('utf-8').strip('"')

        # Adjust for imprecise rendering of time
        datetime_str = JSONRenderer().render(date_parse(data['datetime_joined']) + datetime.timedelta(seconds=0.6)).decode('utf-8').strip('"')

        # Adjust for imprecise rendering of time
        dt = datetime.datetime.combine(datetime.date.today(), date_parse(data['time_joined']).time()) + datetime.timedelta(seconds=0.6)
        time_str = JSONRenderer().render(dt.time()).decode('utf-8').strip('"')

        # DateField
        GET = {
            'date_joined__lte': date_str,
        }
        f = AllLookupsPersonDateFilter(GET, queryset=Person.objects.all())
        self.assertEqual(len(list(f)), 2)
        p = list(f)[0]

        # DateTimeField
        GET = {
            'datetime_joined__lte': datetime_str,
        }
        f = AllLookupsPersonDateFilter(GET, queryset=Person.objects.all())
        self.assertEqual(len(list(f)), 1)
        p = list(f)[0]
        self.assertEqual(p.name, "John")

        # TimeField
        GET = {
            'time_joined__lte': time_str,
        }
        f = AllLookupsPersonDateFilter(GET, queryset=Person.objects.all())
        self.assertEqual(len(list(f)), 1)
        p = list(f)[0]
        self.assertEqual(p.name, "John")
Example #15
0
def get_dates(start, end, tz):
    mytz = tz and zoneinfo.gettz(tz) or tzutc()
    start = date_parse(start).replace(tzinfo=mytz)
    if end:
        end = date_parse(end).replace(tzinfo=mytz)
    else:
        end = datetime.now().replace(tzinfo=mytz)
    if tz:
        start = start.astimezone(tzutc())
        if end:
            end = end.astimezone(tzutc())
    if start > end:
        start, end = end, start
    return start, end
 def _row(self, item):
     photopage_url = None
     for url in item["urls"]["url"]:
         if url["type"] == "photopage":
             photopage_url = url["_content"]
     return (item["id"],
             # date posted is gmt epoch time, convert it to the same format as date taken
             # detail as https://www.flickr.com/services/api/misc.dates.html
             date_parse(time.strftime("%Y-%m-%d %H:%M:%S",
                                      time.gmtime(float(item["dates"]["posted"])))).replace(tzinfo=tzutc()),
             date_parse(item["dates"]["taken"]), item["license"], item["safety_level"],
             item.get("originalformat"), item["owner"]["nsid"], item["owner"]["username"],
             item["title"]["_content"].replace('\n', ' '),
             item["description"]["_content"].replace('\n', ' '), item["media"], photopage_url)
Example #17
0
def getHours():
    session = Session()
    try:
        start = date_parse(request.args.get("start","1979-04-01T07:00:00.000Z"))
    except:
        start = date_parse("1979-04-01T07:00:00.000Z")
    end = request.args.get("stop",None)
    try:
        if end:end = date_parse(end)
    except:
        end = None
    print start
    user_id = current_user.id
    return HoursDict(TimeLogged.times_between(start,end,user_id=user_id))
Example #18
0
def image_to_dict(image, detail=True):
    d = dict(id=image['id'], name=image['name'])
    if detail:
        d['updated'] = utils.isoformat(date_parse(image['updated_at']))
        d['created'] = utils.isoformat(date_parse(image['created_at']))
        d['status'] = 'DELETED' if image['deleted_at'] else 'ACTIVE'
        d['progress'] = 100 if image['status'] == 'available' else 0
        d['user_id'] = image['owner']
        d['tenant_id'] = image['owner']
        d['links'] = util.image_to_links(image["id"])
        if image["properties"]:
            d['metadata'] = image['properties']
        else:
            d['metadata'] = {}
    return d
Example #19
0
 def when(self, when):
     if 'time' in when:
         self.start = self.end = time_parse(when['time'])
         self.all_day = False
     elif 'start_time' in when:
         self.start = time_parse(when['start_time'])
         self.end = time_parse(when['end_time'])
         self.all_day = False
     elif 'date' in when:
         self.start = self.end = date_parse(when['date'])
         self.all_day = True
     elif 'start_date' in when:
         self.start = date_parse(when['start_date'])
         self.end = date_parse(when['end_date'])
         self.all_day = True
Example #20
0
def image_to_dict(image, detail=True):
    d = dict(id=image["id"], name=image["name"])
    if detail:
        d["updated"] = utils.isoformat(date_parse(image["updated_at"]))
        d["created"] = utils.isoformat(date_parse(image["created_at"]))
        d["status"] = "DELETED" if image["deleted_at"] else "ACTIVE"
        d["progress"] = 100 if image["status"] == "available" else 0
        d["user_id"] = image["owner"]
        d["tenant_id"] = image["owner"]
        d["links"] = util.image_to_links(image["id"])
        if image["properties"]:
            d["metadata"] = image["properties"]
        else:
            d["metadata"] = {}
    return d
Example #21
0
def generate_selector(data):
    first_time = date_parse(data[1][0])
    last_time = date_parse(data[-1][0])
    time_length = last_time-first_time-SUBSET_SIZE
    subset_start = first_time +\
        datetime.timedelta(
            seconds=random.random() * time_length.total_seconds())
    if subset_start < first_time:
        subset_start = first_time
    subset_end = subset_start+SUBSET_SIZE

    def selector(timestamp):
        return (subset_start <= timestamp) & (timestamp <= subset_end)

    return selector
def _get_timeseries(date_start, date_end, interval):
    """
        Generates a series of timestamps given a start date,
        end date, and interval
    """

    # Ensure the dates are string representations
    date_start = format_mediawiki_timestamp(date_start)
    date_end = format_mediawiki_timestamp(date_end)

    c = date_parse(date_start) + datetime.timedelta(hours=-int(interval))
    e = date_parse(date_end)
    while c < e:
        c += datetime.timedelta(hours=int(interval))
        yield c
Example #23
0
def format_response(request):
    """
        Populates data for response to metrics requests.

        Parameters
        ~~~~~~~~~~

            request : RequestMeta
                RequestMeta object that stores request data.
    """

    args = ParameterMapping.map(request)

    metric_class = get_metric_type(request.metric)
    metric_obj = metric_class(**args)

    # Prepare metrics output for json response
    response = OrderedDict()

    response['type'] = get_request_type(request)
    response['header'] = metric_obj.header()

    # Get metric object params
    for key in metric_obj.__dict__:
        if not search(r'^_.*', key) and str(key) not in response:
            response[str(key)] = metric_obj.__dict__[key]

    response['cohort'] = str(request.cohort_expr)
    response['cohort_last_generated'] = str(request.cohort_gen_timestamp)
    response['time_of_response'] = datetime.now().strftime(DATETIME_STR_FORMAT)
    response['aggregator'] = str(request.aggregator)
    response['metric'] = str(request.metric)
    response['interval_hours'] = request.interval

    if request.group:
        response['group'] = REVERSE_GROUP_MAP[int(request.group)]
    else:
        # @TODO get access to the metric default for this attribute
        response['group'] = 'default'

    response['datetime_start'] = date_parse(metric_obj.datetime_start).\
        strftime(DATETIME_STR_FORMAT)
    response['datetime_end'] = date_parse(metric_obj.datetime_end).\
        strftime(DATETIME_STR_FORMAT)

    response['data'] = OrderedDict()

    return response, metric_class, metric_obj
Example #24
0
def insert_text_data(data_source, source_url, text, time_posted, session):
    """Adds the base entry for a text data source to the database and returns
    the newly created model


    Keyword arguments:
    data_source -- An enum indicating source. The enum is located in
    interns.models.models.AllowedSources
    source_url -- A string indicating the url the text was pulled from
    text -- the raw text data pulled from the url
    time_posted -- either a datetime object or a datetime string
    session -- active db session
    """
    if not isinstance(time_posted, datetime):
        time_posted = date_parse(time_posted)

    eleanor_logger.debug('Inserting text data into postgres')

    TextModel = models.TextSource(source_key=data_source,
                                  source_url=source_url,
                                  written_text=text,
                                  time_posted=time_posted)

    session.add(TextModel)
    return TextModel
Example #25
0
    def process_json(self, pjson):
        """
        Process a single proposal into a JSON.
        """
        proposal = {}

        for kp, kj in self.copy_keys.items():
            if kj in pjson:
                proposal[kp] = pjson[kj]

        proposal["region_name"] = "Cambridge, MA"
        proposal["source"] = "data.cambridgema.gov"

        tz = pytz.timezone("US/Eastern")
        updated_datestr = pjson.get("decisiondate", pjson["applicationdate"])
        updated_naive = date_parse(updated_datestr)
        proposal["updated_date"] = tz.localize(updated_naive)
        proposal["complete"] = self.match_complete(pjson["status"])
        proposal["description"] = pjson.get("reason_for_petition_other", "")

        if "location" in pjson and not pjson["location"]["needs_recoding"]:
            location = pjson["location"]
            try:
                human_address = json.loads(location["human_address"])
                proposal["address"] = human_address["address"]
                proposal["long"] = float(location["longitude"])
                proposal["lat"] = float(location["latitude"])
            except:
                proposal["location"] = None

        proposal["attributes"] = [(pk, pjson.get(k))
                                  for pk, k in self.remap_attributes.items()]

        return proposal
Example #26
0
def _date_or_none(thing):
    if isinstance(thing, dt.date):
        return thing
    elif isinstance(thing, dt.datetime):
        return thing.date()
    else:
        return default(lambda: date_parse(thing))
Example #27
0
    def refresh_next_events(self,
                            lookahead_time: timedelta) -> List[RoutineEvent]:
        """Refreshes the list of scheduled events.
        
        Parses summary events that are returned by the calendar fetcher, in the time range 
        between utc now and utc now + lookahead_time.   
        """
        # TODO: should we handle events with the same id but that were rescheduled?
        events = []
        for event in self.gcal.fetch(lookahead_time):
            summary = event['summary']
            parse_match = self.re_extract.search(summary)

            if not parse_match:
                logging.warning(
                    'Event "%s" does not match a routine description, skipping!'
                )
                continue

            start_date = date_parse(event['start']['dateTime'])
            routine_name = parse_match.group(1)
            routine_event = RoutineEvent(event['id'], routine_name, start_date)
            events.append(routine_event)

        return events
Example #28
0
def get_git_info():
    """
    Get the abbreviated commit version (not provided by get_git_info())
    Returns a dictionary
    """
    LAST_COMMIT = ['git', 'log', '-1']
    COMMANDS = {
        'short_commit': ['git', 'rev-parse', '--short', 'HEAD'],
        'commit': ['git', 'rev-parse', 'HEAD'],
        'tag': ['git', 'describe', '--tags'],
        'author': LAST_COMMIT + ["--pretty=format:%an"],
        'author_email': LAST_COMMIT + ["--pretty=format:%ae"],
        'committer': LAST_COMMIT + ["--pretty=format:%cn"],
        'committer_email': LAST_COMMIT + ["--pretty=format:%ce"],
        # %cd is the commit date
        'date_ISO': LAST_COMMIT + ['--pretty=format:%cd'],
        'message': LAST_COMMIT + ["--pretty=format:%B"],
        'raw': LAST_COMMIT,
        'root_dir': ['git', 'rev-parse', '--show-toplevel']
    }

    # always return a date, even in case of failure
    r = {'status': False, 'date': None}
    try:
        for var, command in COMMANDS.items():
            # NOTE: The 'text' argument is clearer,
            #       but for Python < 3.7, only `universal_newlines`
            #       is accepted
            try:
                r[var] = subprocess.check_output(
                    command,
                    universal_newlines=True,
                    stderr=subprocess.DEVNULL).strip()
                # keep first part
                if var == 'tag':
                    r[var] = r[var].split('-')[0]
                elif var == 'date_ISO':
                    r['date'] = date_parse(r[var])
                r['status'] = True
            except subprocess.CalledProcessError as e:
                if e.returncode == 128:
                    # generally means "unexpected error"
                    # git status (no repo),
                    # git tag (no tag)
                    r[var] = ''
                else:
                    # should be 1, type whatever that is
                    r[var] = "# Cannot execute '%s': %s" % (command, e)
            except Exception as e:
                # any other error, it's probably meaningless at this point
                r[var] = "# Unexpected error '%s': %s" % (command, e)
        # convert
        return r
    except FileNotFoundError as e:
        # not git command
        return r.update({
            'status': False,
            'diagnosis': 'Git command not found',
            'error': str(e)
        })
Example #29
0
def insert_non_retweet_data(tweet_data):
    """Takes the passed in JSON tweet_data and inserts into the database"""
    eleanor_logger.debug('Inserting tweet data')
    with GetDBSession() as db_session:
        tweetTextModel = insert_text_data(
            models.AllowedSources.twitter.name,
            tweet_data['url'], tweet_data['tweet_text'],
            date_parse(tweet_data['tweet_created']), db_session)

        tweetModel = twitter_models.TwitterSource(
            tweeter_user_name=tweet_data['user_name'],
            tweet_id=tweet_data['tweet_id'],
            is_retweet=False)
        tweetTextModel.twitter_source = tweetModel

        add_user_mentions(tweet_data, tweetModel)
        add_hashtags(tweet_data, tweetModel)
        add_urls(tweet_data, tweetModel)

        try:
            db_session.commit()
        except IntegrityError as e:
            if 'duplicate key value' in e.message:
                # We've already captured this so, moving on
                eleanor_logger.info(
                    'Duplicate tweet is already in the database, skipping')
            else:
                eleanor_logger.critical(
                    ('A database error occurred while attempting '
                     'to insert tweet %s'), e)
        except Exception as e:
            # Something real bad happened
            eleanor_logger.critical(
                ('An error has occurred while inserting a tweet into '
                 'the database %s'), e)
Example #30
0
def request_age(request):
    if isinstance(request, Request):
        created = request.statehistory[0].when
    else:
        created = request.find('history').get('when')
    created = date_parse(created)
    return datetime.utcnow() - created
Example #31
0
def return_point_timestamp(dev_id, unit, period, measurement=None, channel=None):
    dbcon = InfluxDBClient(
        INFLUXDB_HOST,
        INFLUXDB_PORT,
        INFLUXDB_USER,
        INFLUXDB_PASSWORD,
        INFLUXDB_DATABASE)

    query_str = query_string(
        unit,
        dev_id,
        measure=measurement,
        channel=channel,
        value='LAST',
        past_sec=period)
    if query_str == 1:
        return [None, None]

    try:
        raw_data = dbcon.query(query_str).raw
        number = len(raw_data['series'][0]['values'])
        time_raw = raw_data['series'][0]['values'][number - 1][0]
        value = raw_data['series'][0]['values'][number - 1][1]
        value = '{:.3f}'.format(float(value))
        # Convert date-time to epoch (potential bottleneck for data)
        dt = date_parse(time_raw)
        timestamp = calendar.timegm(dt.timetuple()) * 1000
        return [timestamp, value]
    except KeyError:
        return [None, None]
    except Exception:
        return [None, None]
Example #32
0
def check_update_needed(db_table_object, repository_name, pushed_at):
    """
    Returns True if there is a need to clone the github repository
    """
    logger.info(
        f"This is the repo name from check_update <<{repository_name}>> and db_table <<{db_table_object}>>"
    )
    result = get_single_repository(db_table_object, repository_name)

    logger.info(result)

    if not result:
        logger.info("result not found")
        return True
    else:
        logger.info("result found")
        logger.info(f"This is the result {result}")

        epoch = date_parse(pushed_at).timestamp(
        )  ##the pushed_at timetsamp available in the repo right now
        logger.info(
            f"Comparing {int(epoch)} and {result['downloaded_at']} for {repository_name}"
        )
        if int(epoch) > int(result["downloaded_at"]):
            return True

    return False
Example #33
0
def fetch_data(url):
    """Helper method to fetch data from url."""
    page_tree = get_tree(url)
    if page_tree is None:
        return
    date = \
        page_tree.xpath("//div[@class='meta-data']/div/h3/a/text()")[0]
    date = date_parse(date)
    if YEAR:
        if date.year < YEAR:
            sys.exit(0)

    image_link = page_tree.xpath("//img[@id='main-comic']/@src")[0]
    image_link = "http:{}".format(image_link) \
        if image_link.startswith("//") else \
        image_link
    permalink = page_tree.xpath("//input[@id='permalink']/@value")[0]
    author = page_tree.xpath(
        "//small[@class='author-credit-name']/text()"
    )[0].strip("by ")
    comic_number = permalink.strip("/").split("/")[-1]
    return {
        "number": comic_number,
        "image": image_link,
        "permalink": permalink,
        "metadata": {
            "date": (date.year, date.month, date.day),
            "author": author
        }
    }
Example #34
0
def pg_sanitize_value(value, pg_datatype, max_length):
    '''attempt to sanitze the value to be better suitable to
       cast to the desired datatype in postgres.
       
       in case of failures to parse the value it gets returned as it is'''
    if value is not None:
        if pg_datatype in ('date', 'timestamptz', 'timestamp'):
            try:
                return value.isoformat()
            except AttributeError:
                try:
                    return date_parse(value).isoformat()
                except:
                    pass # let postgresql try its best at parsing :(
        elif pg_datatype in ('char', 'text', 'varchar'):
            # truncate texts when there is an charater limit in the db. Cast to string
            # to make in work for values send as int/float/...
            if max_length is not None:
                return str(value)[:max_length]
        elif pg_datatype in ('bytea', 'geometry'):
            return Binary(value)
        elif pg_datatype == 'json':
            # serialize to json to use value with postgresql json type
            return Json(value)
    return value
Example #35
0
 def __parse_config(self, filtered_config, db_config):
     self._isolines_provider = filtered_config[
         self.ISOLINES_PROVIDER_KEY].lower()
     if not self._isolines_provider:
         self._isolines_provider = self.DEFAULT_PROVIDER
     self._geocoder_provider = filtered_config[
         self.GEOCODER_PROVIDER_KEY].lower()
     self._period_end_date = date_parse(
         filtered_config[self.PERIOD_END_DATE])
     self._isolines_quota = self._get_effective_monthly_quota(
         self.QUOTA_KEY)
     if filtered_config[self.SOFT_LIMIT_KEY].lower() == 'true':
         self._soft_isolines_limit = True
     else:
         self._soft_isolines_limit = False
     if self._isolines_provider == self.HEREMAPS_PROVIDER:
         self._heremaps_app_id = db_config.heremaps_isolines_app_id
         self._heremaps_app_code = db_config.heremaps_isolines_app_code
         self._heremaps_apikey = db_config.heremaps_isolines_apikey
         self._heremaps_use_apikey = db_config.heremaps_isolines_use_apikey
         self._heremaps_service_params = db_config.heremaps_isolines_service_params
     elif self._isolines_provider == self.MAPZEN_PROVIDER:
         self._mapzen_matrix_api_key = self._db_config.mapzen_matrix_api_key
         self._mapzen_matrix_service_params = db_config.mapzen_matrix_service_params
         self._mapzen_isochrones_service_params = db_config.mapzen_isochrones_service_params
     elif self._isolines_provider == self.MAPBOX_PROVIDER:
         self._mapbox_matrix_api_keys = self._db_config.mapbox_matrix_api_keys
         self._mapbox_matrix_service_params = db_config.mapbox_matrix_service_params
         self._mapbox_isochrones_service_params = db_config.mapbox_isochrones_service_params
     elif self._isolines_provider == self.TOMTOM_PROVIDER:
         self._tomtom_isolinesx_api_keys = self._db_config.tomtom_isolines_api_keys
         self._tomtom_isolines_service_params = db_config.tomtom_isolines_service_params
Example #36
0
    def future_event(self):
        today = date.today()

        if isinstance(self.date, str):
            return today <= date_parse(self.date).date()

        return today <= self.date
Example #37
0
def list_images(request, detail=False):
    # Normal Response Codes: 200, 203
    # Error Response Codes: computeFault (400, 500),
    #                       serviceUnavailable (503),
    #                       unauthorized (401),
    #                       badRequest (400),
    #                       overLimit (413)

    log.debug('list_images detail=%s', detail)
    since = utils.isoparse(request.GET.get('changes-since'))
    with backend.PlanktonBackend(request.user_uniq) as b:
        images = b.list_images()
        if since:
            updated_since = lambda img: date_parse(img["updated_at"]) >= since
            images = ifilter(updated_since, images)
            if not images:
                return HttpResponse(status=304)

    images = sorted(images, key=lambda x: x['id'])
    reply = [image_to_dict(image, detail) for image in images]

    if request.serialization == 'xml':
        data = render_to_string('list_images.xml',
                                dict(images=reply, detail=detail))
    else:
        data = json.dumps(dict(images=reply))

    return HttpResponse(data, status=200)
Example #38
0
def request_when_staged(request, project, first=False):
    when = None
    for history in request.statehistory:
        if project in history.comment:
            when = history.when

    return date_parse(when)
Example #39
0
def last_data(sensor_measure, sensor_id, sensor_period):
    """Return the most recent time and value from influxdb"""
    current_app.config['INFLUXDB_USER'] = INFLUXDB_USER
    current_app.config['INFLUXDB_PASSWORD'] = INFLUXDB_PASSWORD
    current_app.config['INFLUXDB_DATABASE'] = INFLUXDB_DATABASE
    dbcon = influx_db.connection
    try:
        raw_data = dbcon.query("""SELECT last(value)
                                  FROM {}
                                  WHERE device_id='{}'
                                        AND time > now() - {}m
                               """.format(sensor_measure, sensor_id,
                                          sensor_period)).raw
        number = len(raw_data['series'][0]['values'])
        time_raw = raw_data['series'][0]['values'][number - 1][0]
        value = raw_data['series'][0]['values'][number - 1][1]
        value = '{:.3f}'.format(float(value))
        # Convert date-time to epoch (potential bottleneck for data)
        dt = date_parse(time_raw)
        timestamp = calendar.timegm(dt.timetuple()) * 1000
        live_data = '[{},{}]'.format(timestamp, value)
        return Response(live_data, mimetype='text/json')
    except KeyError:
        logger.debug("No Data returned form influxdb")
        return '', 204
    except Exception as e:
        logger.exception("URL for 'last_data' raised and error: "
                         "{err}".format(err=e))
        return '', 204
def framework_agreement(framework_slug):
    framework = get_framework(data_api_client, framework_slug, allowed_statuses=['standstill', 'live'])
    supplier_framework = return_supplier_framework_info_if_on_framework_or_abort(data_api_client, framework_slug)

    if supplier_framework['agreementReturned']:
        date_formatter = DateFormatter(current_app.config['DM_TIMEZONE'])
        supplier_framework['agreementReturnedAt'] = date_formatter.datetimeformat(
            date_parse(supplier_framework['agreementReturnedAt'])
        )

    # if there's a frameworkAgreementVersion key, it means we're on G-Cloud 8 or higher
    if framework.get('frameworkAgreementVersion'):
        drafts, complete_drafts = get_drafts(data_api_client, framework_slug)
        lots_with_completed_drafts = [
            lot for lot in framework['lots'] if count_drafts_by_lot(complete_drafts, lot['slug'])
        ]

        return render_template(
            'frameworks/contract_start.html',
            signature_page_filename=SIGNATURE_PAGE_FILENAME,
            framework=framework,
            lots=[{
                'name': lot['name'],
                'has_completed_draft': (lot in lots_with_completed_drafts)
            } for lot in framework['lots']],
            supplier_framework=supplier_framework,
        ), 200

    return render_template_with_csrf(
        "frameworks/agreement.html",
        framework=framework,
        supplier_framework=supplier_framework,
        agreement_filename=AGREEMENT_FILENAME
    )
Example #41
0
 def _format_mediawiki_timestamp(self, timestamp_repr):
     """ Convert to mediawiki timestamps """
     if hasattr(timestamp_repr, 'strftime'):
         return timestamp_repr.strftime(MEDIAWIKI_TIMESTAMP_FORMAT)
     else:
         return date_parse(timestamp_repr).strftime(
             MEDIAWIKI_TIMESTAMP_FORMAT)
Example #42
0
def return_point_timestamp(dev_id, unit, period, measurement=None, channel=None):
    current_app.config['INFLUXDB_USER'] = INFLUXDB_USER
    current_app.config['INFLUXDB_PASSWORD'] = INFLUXDB_PASSWORD
    current_app.config['INFLUXDB_DATABASE'] = INFLUXDB_DATABASE
    current_app.config['INFLUXDB_TIMEOUT'] = 5
    dbcon = influx_db.connection

    query_str = query_string(
        unit,
        dev_id,
        measure=measurement,
        channel=channel,
        value='LAST',
        past_sec=period)
    if query_str == 1:
        return [None, None]

    try:
        raw_data = dbcon.query(query_str).raw
        number = len(raw_data['series'][0]['values'])
        time_raw = raw_data['series'][0]['values'][number - 1][0]
        value = raw_data['series'][0]['values'][number - 1][1]
        value = '{:.3f}'.format(float(value))
        # Convert date-time to epoch (potential bottleneck for data)
        dt = date_parse(time_raw)
        timestamp = calendar.timegm(dt.timetuple()) * 1000
        return [timestamp, value]
    except KeyError:
        return [None, None]
    except Exception:
        return [None, None]
Example #43
0
    def _printable_cert(self, docs):
        archive = docs['archive']
        cipher = archive.meta.cipher
        if hasattr(cipher, 'mode'):
            cipher = cipher.mode
        created = archive.meta.created
        try:
            created = date_parse(created)
        except:
            pass
        expires = created + date_delta(years=30)
        md5 = b2a_hex(docs['auth'].md5).upper()
        key = b2a_hex(docs['cert'].key).upper()
        hk = pairs(fours(pairs(iter(key))), " . ")

        return resource_string(__name__, "data/certificate.html").format(
            json=as_json(docs),
            aid=docs['signature'].aid,
            keyB=next(hk),
            keyC=next(hk),
            keyD=next(hk),
            keyE=next(hk),
            name=archive.meta.name,
            email=archive.meta.email,
            uploaded=created.strftime("%c"),
            expires=expires.strftime("%c"),
            title=archive.title,
            desc=archive.description,
            md5=" . ".join(fours(pairs(iter(md5)))),
            fmt=archive.meta.format,
            cipher=cipher)
 def setUp(self):
     data = load_data('report.json')
     self.records = data['asg']['records']
     self.headers = data['asg']['headers']
     self.rows = data['asg']['rows']
     for rec in self.records.values():
         rec['CustodianDate'] = date_parse(rec['CustodianDate'])
Example #45
0
def insert_text_data(data_source, source_url, text, time_posted, session):
    """Adds the base entry for a text data source to the database and returns
    the newly created model


    Keyword arguments:
    data_source -- An enum indicating source. The enum is located in
    interns.models.models.AllowedSources
    source_url -- A string indicating the url the text was pulled from
    text -- the raw text data pulled from the url
    time_posted -- either a datetime object or a datetime string
    session -- active db session
    """
    if not isinstance(time_posted, datetime):
        time_posted = date_parse(time_posted)

    eleanor_logger.debug('Inserting text data into postgres')

    TextModel = models.TextSource(
        source_key=data_source,
        source_url=source_url,
        written_text=text,
        time_posted=time_posted
    )

    session.add(TextModel)
    return TextModel
Example #46
0
def search_count_of_user_tweets_on_day(username, date, search_term):
    """When given a username, datetime, and search_term return the number of
    times search term was tweeted by username on the day of datetime"""
    return_data = {}
    date = date_parse(date)
    start = datetime(year=date.year, month=date.month, day=date.day)
    end = start + timedelta(days=1)
    with GetDBSession() as db_session:
        user_query = db_session.query(
            twitter_models.TwitterSource
            ).filter(
                twitter_models.TwitterSource.tweeter_user_name == username
            ).join(
                twitter_models.TwitterSource.text_source, aliased=True
            ).filter(
                and_(
                    and_(
                        models.TextSource.time_posted > start,
                        models.TextSource.time_posted < end
                    ),
                    models.TextSource.written_text.contains(search_term)
                )
            )

        return_data[username] = {
            search_term: user_query.count(),
            'date': start.strftime('%Y-%m-%d')
        }
    return return_data
Example #47
0
def ensure_latest_package(package_name: str,
                          pip_args=[],
                          md_file: str = 'VERSION.md',
                          update_pkg_pip_args=[]):
    """Toss this in main to perform a check that the user is always running latest

    pip_args will be passed as a list to add things like trusted-host or extra-index-url.

    Example:
        pip_args = ['--extra-index-url', 'https://artifactory.com/api/pypi/eg/simple',
                    '--trusted-host', 'artifactory.com']

    :param package_name:
    :param pip_args:
    :param md_file: Display this file if the package was updated
    :param update_pkg_pip_args: pip args pass to update_package on out of date package e.g. --extra-index-url
    :return:
    """
    config = Config('~/.double_click/package_versions.ini')

    if not config.has_section(package_name):
        config.add_section(package_name)
        last_checked = None
    else:
        last_checked = config.get(package_name, 'last_checked')

    if not last_checked or date_parse(last_checked) < dt.utcnow() - timedelta(
            hours=1):
        try:
            latest_version = None
            current_version = pkg_resources.get_distribution(
                package_name).version
            version_output = update_package(f'{package_name}==DoesNotExist',
                                            pip_args=update_pkg_pip_args)
            version_output = [
                line for line in version_output.split('\n')
                if 'DoesNotExist (from versions:' in line
            ]
            if version_output:
                versions = re.findall(r'([0-9][^,)]+)', version_output[0])
                latest_version = versions[-1]

            if latest_version and current_version != latest_version:
                update_pkg_pip_args = update_pkg_pip_args if update_pkg_pip_args else pip_args
                update_package(package_name, pip_args=update_pkg_pip_args)
                config.set(package_name, 'last_checked', str(dt.utcnow()))
                update_msg = f'#An update to {package_name} was retrieved that prevented your command from running.'
                if md_file:
                    display_version(package_name, md_file)
                    update_msg += f'\nPlease review changes and re-run your command.'
                else:
                    update_msg += f'\nPlease re-run your command.'
                echo(update_msg)
                sys.exit(0)
        except pkg_resources.DistributionNotFound:
            # This should only occur during testing
            echo(f'{package_name} not found')

        config.set(package_name, 'last_checked', str(dt.utcnow()))
        config.save()
Example #48
0
def parse_query(query):
    """
    Parses the query for a chart and returns the relevant parameters
    that should be used to make the query on the database.
    @return: a tuple with all the relevant query information
    """
    # Date range
    start_time = datetime.min
    if query.stats_to is not None and query.stats_to != "":
        start_time = date_parse(query.stats_to)

    end_time = datetime.now().replace(minute=0, second=0, microsecond=0)
    if query.stats_from is not None and query.stats_from != "":
        end_time = date_parse(query.stats_from)

    date_range = [start_time, end_time]

    # Exercises filter
    exercise_list = [
        name.strip() for name in query.filter_exercises.split(",")
        if name.strip() != ""
    ]
    # Tags filter
    tag_list = [
        tag.strip() for tag in query.filter_tags.split(",")
        if tag.strip() != ""
    ]

    # Min-max grades
    min_grade = 0
    if query.min_submission_grade is not None and query.min_submission_grade != "":
        percentage = query.min_submission_grade
        if percentage.endswith('%'):
            percentage = percentage[:-1]
        min_grade = float(percentage)

    max_grade = 100
    if query.max_submission_grade is not None and query.max_submission_grade != "":
        percentage = query.max_submission_grade
        if percentage.endswith('%'):
            percentage = percentage[:-1]
        max_grade = float(percentage)

    grade_bounds = (min_grade, max_grade)

    return (query.chart_type, date_range, exercise_list, tag_list,
            grade_bounds, query.submissions_filter)
Example #49
0
    def _process_barcelona_rows(self, table):
        """Process rows for barcelona port.

        Decode all rows as unicode strings since tabula outputs byte strings by default.
        Extract matching date for each table section based on the section's description.
        Yield only rows that contain table data, skipping table section description.

        Known table section headers (each section has a different matching date):
            - "Buques que efectuaron operaciones durante la noche del día anterior"
            - "Buques que efectuaron operaciones durante las 20 horas del viernes 16.3.18 hata el domingo 18.3.18"  # noqa
            - "Buques que efectuaron operaciones durante el lunes 18.3.18"
            - "Ultima hora"

        Args:
            table (List[List[str]]): list of table rows from pdf

        Yields:
            List[str]:

        """
        matching_date = None
        for idx, row in enumerate(table):
            # tabula stores string data as bytes by default
            row = [cell for cell in row]

            # try deciphering matching_date of subsequent rows
            if any('Buques que efectuaron' in cell for cell in row):
                raw_matching_date = ''.join(row)
                date_match = re.search(r'(\d{1,2}\.\d{1,2}\.\d{2})',
                                       raw_matching_date)
                # matching date is mentioned explictly in table section description
                if date_match:
                    matching_date = to_isoformat(date_match.group(1))
                    logger.debug(
                        'Found matching date: {}'.format(matching_date))

                # sometimes matching date is described implicitly in words in the pdf
                elif 'anterior' in raw_matching_date:
                    matching_date = to_isoformat(
                        str(
                            date_parse(self.reported_date, dayfirst=False) -
                            timedelta(days=1)),
                        dayfirst=False,
                    )
                    logger.debug(
                        'Found matching date: {}'.format(matching_date))
                else:
                    raise ValueError('Unable to find matching date: {}'.format(
                        raw_matching_date))
            elif any('ltima hora' in cell for cell in row):
                matching_date = self.reported_date
                logger.debug('Found matching date: {}'.format(matching_date))

            # do not yield table section headers
            if not ('/EXPORT' in row or any('Buques que efectuaron' in cell
                                            for cell in row)
                    or any('ltima hora' in cell for cell in row)):
                row.append('matching_date' if idx == 0 else matching_date)
                yield row
Example #50
0
 def iso_date(date_str):
     """
     this method will make sure that dates are formatted properly
     as with isoformat
     :param date_str:
     :return: YYYY-MM-DD date formatted
     """
     return date_parse(date_str).date().isoformat()
Example #51
0
def date_query(expr, value):
    operator_func, value = parse_query_value(value)
    try:
        date_value = date_parse(value)
    except ValueError:
        date_value = datetime.datetime.now()

    return operator_func(expr, date_value)
Example #52
0
 def test_parse(self):
     """Test the parse_raw method."""
     data = copy.deepcopy(TEST_CHECK)
     check = Check().parse(data)
     assert check.onfido_id == TEST_CHECK["id"]
     assert check.created_at == date_parse(TEST_CHECK["created_at"])
     assert check.status == "in_progress"
     assert check.result is None
Example #53
0
def get_launch_by_slug(launch_slug):
    launch = launches[launch_slug]
    data = get_weather(launch['lat'], launch['lng'])
    log.debug(data)
    interval = request.args.get("interval")

    if interval == None or interval == "daily":
        result = []
        grouped = itertools.groupby(
            data["properties"]["periods"],
            lambda x: date_parse(x["startTime"]).date().isoformat(),
        )
        for day, hours in grouped:
            wind_speed = []
            wind_direction = []
            for hour in hours:
                start = date_parse(hour["startTime"])
                if 6 < start.hour < 16:
                    wind = make_wind_dict(hour)
                    wind_speed.append(wind["speed"])
                    wind_direction.append(wind["direction"])
            try:
                speed_average = statistics.mean(wind_speed)
                direction_average = statistics.mean(wind_direction)
            except statistics.StatisticsError as _:
                pass
            finally:
                result.append(
                    {day: make_time_unit_dict(speed_average, direction_average, launch)}
                )

        return jsonify(result)
    elif interval == "hourly":
        result = []
        for hour in data["properties"]["periods"]:
            wind = make_wind_dict(hour)
            result.append(
                {
                    date_parse(hour["startTime"]).isoformat(): make_time_unit_dict(
                        wind["speed"], wind["direction"], launch
                    )
                }
            )
        return jsonify(result)
    else:
        return "Invalid interval", 400
Example #54
0
def db_date(value, context):

    try:
        value = date_parse(value)
    except (ValueError, e):
        raise Invalid(str(e))

    return value
Example #55
0
 def request_as_comment_dict(self, request):
     return {
         'who': request.creator,
         'when': date_parse(request.statehistory[0].when),
         'id': '-1',
         'parent': None,
         'comment': request.description,
     }
Example #56
0
def start_date(sync_db: sqlalchemy.engine.base.Engine,
               env_start_date: str) -> datetime:
    last_date: Optional[datetime] = last_sync_date(sync_db)
    if last_date is not None:
        return last_date + timedelta(days=1)
    if env_start_date == "":
        return datetime.today()
    return date_parse(env_start_date)
Example #57
0
 def filter_expiry_before(value):
     if value:
         try:
             date = date_parse(value, dayfirst=True).date()
             return Q(end_date__lte=date)
         except:
             pass
     return None
Example #58
0
 def create(self, date_str):
     '''
     Convert string to datetime.datetime
     '''
     try:
         return date_parse(date_str)
     except:
         return date_str
Example #59
0
 def dict_from_parsed(cls, parsed_point, dataset_id, training_selector):
     timestamp = date_parse(parsed_point[0])
     return dict(created_at=datetime.datetime.now(),
                 timestamp=timestamp,
                 unit=parsed_point[1],
                 value=float(parsed_point[2]),
                 dataset_id=dataset_id,
                 training=training_selector(timestamp))
Example #60
0
def test_output(bucket, client, request):
    bucket().blob.return_value = key = mock.MagicMock()
    with mock_datetime_now(date_parse('2020/06/10 13:00'), datetime):
        output = get_blob_output(request)
        assert output.key_prefix == 'policies/xyz/2020/06/10/13'
        output.upload_file('resources.json',
                           f"{output.key_prefix}/resources.json")
        key.upload_from_filename.assert_called_once()