Ejemplo n.º 1
0
def test_map_fields_with_validity_end_and_start(app):
    from lemur.plugins.lemur_digicert.plugin import map_fields

    names = [u'one.example.com', u'two.example.com', u'three.example.com']

    options = {
        'common_name': 'example.com',
        'owner': '*****@*****.**',
        'description': 'test certificate',
        'extensions': {
            'sub_alt_names': {
                'names': [x509.DNSName(x) for x in names]
            }
        },
        'validity_end': arrow.get(2017, 5, 7),
        'validity_start': arrow.get(2016, 10, 30)
    }

    data = map_fields(options, CSR_STR)

    assert data == {
        'certificate': {
            'csr': CSR_STR,
            'common_name': 'example.com',
            'dns_names': names,
            'signature_hash': 'sha256'
        },
        'organization': {'id': 111111},
        'custom_expiration_date': arrow.get(2017, 5, 7).format('YYYY-MM-DD')
    }
Ejemplo n.º 2
0
def stats_package(package=None):
    """
    Return a dictionary of statistics for a package name (optional), or all packages

    :param string package: The package name to provide p_stats for
    :query string stime: The lower bound of the time period to filter on
    :query string ftime: The upper bound of the time period to filter on

    stime and ftime both filter on the release start time.
    Note that standard orlo API filters can be used here as well, not just stime/ftime
    """
    s_stime = request.args.get('stime')
    s_ftime = request.args.get('ftime')

    stime, ftime = None, None
    try:
        if s_stime:
            stime = arrow.get(s_stime)
        if s_ftime:
            ftime = arrow.get(s_ftime)
    except RuntimeError:  # super-class to arrows ParserError, which is not importable
        raise InvalidUsage("A badly formatted datetime string was given")

    if package:
        package_list = [package]
    else:
        package_result = queries.package_list().all()
        # Flatten query result
        package_list = [u[0] for u in package_result]

    package_stats = build_stats_dict('package', package_list, stime=stime, ftime=ftime)

    return jsonify(package_stats)
Ejemplo n.º 3
0
def stats_user(username=None):
    """
    Return a dictionary of statistics for a username (optional), or all users

    :param string username: The username to provide u_stats for
    :query string stime: The lower bound of the time period to filter on
    :query string ftime: The upper bound of the time period to filter on

    stime and ftime both filter on the release start time.
    Note that standard orlo API filters can be used here as well, not just stime/ftime
    """
    s_stime = request.args.get('stime')
    s_ftime = request.args.get('ftime')

    stime, ftime = None, None
    try:
        if s_stime:
            stime = arrow.get(s_stime)
        if s_ftime:
            ftime = arrow.get(s_ftime)
    except RuntimeError:  # super-class to arrows ParserError, which is not importable
        raise InvalidUsage("A badly formatted datetime string was given")

    if username:
        user_list = [username]
    else:
        app.logger.debug("Fetching user list")
        user_result = queries.user_list().all()
        # Flatten query result
        user_list = [u[0] for u in user_result]

    app.logger.debug("Building stats dict")
    user_stats = build_stats_dict('user', user_list, stime=stime, ftime=ftime)

    return jsonify(user_stats)
Ejemplo n.º 4
0
    def read_data(self, **kwargs):
        """
            get the data from the service
            as the pocket service does not have any date
            in its API linked to the note,
            add the triggered date to the dict data
            thus the service will be triggered when data will be found

            :param kwargs: contain keyword args : trigger_id at least
            :type kwargs: dict

            :rtype: list
        """
        self.date_triggered = arrow.get(kwargs.get('date_triggered'))
        self.trigger_id = kwargs.get('trigger_id')
        self.user = kwargs.get('user') if kwargs.get('user') else ''

        responses = self._get_wall_data()

        data = []
        try:
            json_data = responses.json()

            for d in json_data['_embedded']['items']:
                created_at = arrow.get(d.get('created_at'))
                date_triggered = arrow.get(self.date_triggered)

                if created_at > date_triggered:
                    data.append({'title': d.get('title'),
                                 'content': d.get('content')})
            if len(data) > 0:
                cache.set('th_wallabag_' + str(self.trigger_id), data)
        except Exception as e:
                logger.critical(e)
        return data
Ejemplo n.º 5
0
def stats_team(team=None):
    """
    Return a dictionary of statistics for a team (optional), or all teams

    :param string team: The team name to provide t_stats for
    :query string stime: The lower bound of the time period to filter on
    :query string ftime: The upper bound of the time period to filter on

    stime and ftime both filter on the release start time.
    """
    s_stime = request.args.get('stime')
    s_ftime = request.args.get('ftime')

    stime, ftime = None, None
    try:
        if s_stime:
            stime = arrow.get(s_stime)
        if s_ftime:
            ftime = arrow.get(s_ftime)
    except RuntimeError:  # super-class to arrows ParserError, which is not importable
        raise InvalidUsage("A badly formatted datetime string was given")

    if team:
        team_list = [team]
    else:
        team_result = queries.team_list().all()
        # Flatten query result
        team_list = [u[0] for u in team_result]

    team_stats = build_stats_dict('team', team_list, stime=stime, ftime=ftime)

    return jsonify(team_stats)
Ejemplo n.º 6
0
def main():
    args = docopt.docopt(__doc__)

    atxutils.logging_setup('DEBUG')

    if args['--till']:
        till = arrow.get(args['--till']).timestamp
    else:
        till = arrow.now().timestamp

    if args['--since'].startswith('-'):
        if args['--since'].endswith('d'):
            since = till - int(args['--since'][1:-1]) * 3600 * 24
        else:
            since = till - int(args['--since'][1:])
    else:
        since = arrow.get(args['--since']).timestamp

    assert (since <= till)

    logging.info('since: %s, till: %s' % (since, till))

    if args['--incoming']:
        path_incoming = args['--incoming']
    else:
        logging.info('--incoming not specified, defaulting to /home/incoming')
        path_incoming = '/home/incoming'

    mjs = args['<mj>']

    res = process_many(mjs, since, till, path_incoming)
    import pprint
    pprint.pprint(res)
Ejemplo n.º 7
0
    def gmail2slack(self):
        try:
	    label_id = self.getLabelIdByName(self.label_name)
	    if not label_id: raise Exception("target label name not found")
            response = self.gmail_service.users().messages().list(userId=self.user_id, labelIds=label_id).execute()
        except AccessTokenRefreshError:
            return

        message_ids = []
        if 'messages' in response:
            message_ids.extend(response['messages'])
        for msg_id in message_ids:
            message = self.gmail_service.users().messages().get(userId=self.user_id, id=msg_id['id']).execute()
            headers = dict()
            for header in message['payload']['headers']:
                headers[header['name']] = header['value']

            try:  # due to issue @ https://github.com/crsmithdev/arrow/issues/176
                from_ts = arrow.get(headers['Date'], 'ddd, D MMM YYYY HH:mm:ss ZZ').timestamp
            except:
                continue

            if from_ts < self.state['timestamp']:
                break
            from_date = arrow.get(from_ts).to('US/Eastern').format('YYYY-MM-DD HH:mm:ss ZZ')
            say = "New Email\n>From: %s\n>Date: %s\n>Subject: %s\n>\n>%s" % \
                  (headers['From'], from_date, headers['Subject'], message['snippet'])
            self.slack.direct_message(say, self.config['slack_user_id'], self.config['slack_from'])
        self.save_state()
Ejemplo n.º 8
0
    def assert_date(self, field_name, close_to_now=False, **kwargs):
        """Assert if the date field matches the timedelta.

        :param field_name: Name of the field: due_at or alarm_at.
        :param close_to_now: Assert both chore dates are close to the current time
            (since we cannot assert the exact time).
        :param kwargs: Arguments to the timedelta() function.
        """
        if close_to_now:
            expected = right_now() + timedelta(**kwargs)
            begin = expected - timedelta(seconds=self.NOW_PRECISION_SECONDS)
            end = expected + timedelta(seconds=self.NOW_PRECISION_SECONDS)
            value = getattr(self.chore, field_name)

            assert begin <= value <= end, "Field {} with value {} should be between {} and {}".format(
                field_name, value, arrow.get(begin).format(DATETIME_FORMAT), arrow.get(end).format(DATETIME_FORMAT)
            )
        else:
            expected = self.previous[field_name]
            if kwargs:
                expected += timedelta(**kwargs)
            actual = getattr(self.chore, field_name)
            assert actual == expected, "Expected {} {}, got {}".format(
                field_name, arrow.get(expected).format(DATETIME_FORMAT), arrow.get(actual).format(DATETIME_FORMAT)
            )

        self.previous[field_name] = expected
Ejemplo n.º 9
0
    def compare_values(self, x, y):
        if isinstance(x, datetime) or isinstance(x, int):
            x = arrow.get(x)
        if isinstance(y, datetime) or isinstance(x, int):
            y = arrow.get(y)

        return x == y
def export_timeline(user_id, start_day_str, end_day_str, file_name):
    logging.info("Extracting timeline for user %s day %s -> %s and saving to file %s" %
                 (user_id, start_day_str, end_day_str, file_name))

    # day_dt = pydt.datetime.strptime(day_str, "%Y-%m-%d").date()
    start_day_ts = arrow.get(start_day_str).timestamp
    end_day_ts = arrow.get(end_day_str).timestamp
    logging.debug("start_day_ts = %s (%s), end_day_ts = %s (%s)" % 
        (start_day_ts, arrow.get(start_day_ts),
         end_day_ts, arrow.get(end_day_ts)))

    ts = esta.TimeSeries.get_time_series(user_id)
    loc_time_query = estt.TimeQuery("data.ts", start_day_ts, end_day_ts)
    loc_entry_list = list(ts.find_entries(key_list=None, time_query=loc_time_query))
    trip_time_query = estt.TimeQuery("data.start_ts", start_day_ts, end_day_ts)
    trip_entry_list = list(ts.find_entries(key_list=None, time_query=trip_time_query))
    place_time_query = estt.TimeQuery("data.enter_ts", start_day_ts, end_day_ts)
    place_entry_list = list(ts.find_entries(key_list=None, time_query=place_time_query))

    combined_list = loc_entry_list + trip_entry_list + place_entry_list
    logging.info("Found %d loc entries, %d trip-like entries, %d place-like entries = %d total entries" % 
        (len(loc_entry_list), len(trip_entry_list), len(place_entry_list), len(combined_list)))

    validate_truncation(loc_entry_list, trip_entry_list, place_entry_list)

    unique_key_list = set([e["metadata"]["key"] for e in combined_list])
    logging.info("timeline has unique keys = %s" % unique_key_list)
    if len(combined_list) == 0 or unique_key_list == set(['stats/pipeline_time']):
        logging.info("No entries found in range for user %s, skipping save" % user_id)
    else:
        combined_filename = "%s_%s.gz" % (file_name, user_id)
        json.dump(combined_list,
            gzip.open(combined_filename, "wb"), default=bju.default, allow_nan=False, indent=4)
Ejemplo n.º 11
0
def recurring_event(db, default_namespace, request):
    params = request.param
    all_day = params.get('all_day', False)

    rrule = ["RRULE:FREQ=WEEKLY", "EXDATE:20150324T013000,20150331T013000Z"]
    cal = db.session.query(Calendar).filter_by(
        namespace_id=default_namespace.id).order_by('id').first()
    ev = Event(namespace_id=default_namespace.id,
               calendar=cal,
               title='recurring-weekly',
               description='',
               uid='recurapitest',
               location='',
               busy=False,
               read_only=False,
               reminders='',
               recurrence=rrule,
               start=arrow.get(2015, 3, 17, 1, 30, 00),
               end=arrow.get(2015, 3, 17, 1, 45, 00),
               all_day=all_day,
               is_owner=True,
               participants=[],
               provider_name='inbox',
               raw_data='',
               original_start_tz='America/Los_Angeles',
               original_start_time=None,
               master_event_uid=None,
               source='local')
    db.session.add(ev)
    db.session.commit()
    return ev
Ejemplo n.º 12
0
def get_times(service, calendars):
	busyTimes = [] # Busy Time List
	
	time_min = flask.session["begin_date"]
	time_max = arrow.get(flask.session["end_date"]).replace(hours=+24, seconds=-1).isoformat()
	
	# Iterate through selected calendars
	for cal in calendars:
		# Get items from google
		calendar_items = service.events().list(calendarId=cal, timeMin=time_min, timeMax=time_max, singleEvents=True).execute()['items']
		for item in calendar_items:
			# Add to proper list
			try:
				t_start = item["start"]["dateTime"]
			except:
				t_start = arrow.get(item["start"]["date"], "YYYY-MM-DD").isoformat()
			try:
				t_end = item["end"]["dateTime"]
			except:
				t_end = arrow.get(item["end"]["date"], "YYYY-MM-DD").isoformat()
			item_range = {"start": t_start, "end": t_end, "desc": item["summary"]}
			if "transparency" in item and item["transparency"] == "transparent":
				print()# testing transparency property
			else:
				busyTimes.append(item_range)
	
	return busyTimes
    def getManualData(self, forceRefresh = False):
        repo = requests.get(self.baseURL,auth=(self.github_user, self.github_password))
        repo_json = json.loads(repo.text)
        #Should new data be loaded or cache returned

        if self.last_update < arrow.get(repo_json["updated_at"]) or forceRefresh:
            print("Fetching new data from GitHub")
            self.last_update = arrow.get(repo_json["updated_at"])
            rJSON = {}
            rJSON["files"] = []

            #Find all files
            filesDir = requests.get(self.baseURL + "/contents/system/docs/manual",auth=(self.github_user, self.github_password))
            if filesDir.ok:
                filesJSON = json.loads(filesDir.text)
                j = 0
                for i, item in enumerate(filesJSON):

                    if filesJSON[i]["type"] == "file" and filesJSON[i]["name"] != "Readme.md":
                        rJSON["files"].append({})
                        rJSON["files"][j].update({"name": filesJSON[i]["name"]})

                        fContents = requests.get(filesJSON[i]["url"],auth=(self.github_user, self.github_password))
                        if fContents.ok:
                            contentsJSON = json.loads(fContents.text)
                            rJSON["files"][j].update({"mdB64": contentsJSON['content']})
                        j += 1

            #setup cache for faster responses
            self.cached_return_json = rJSON

            return rJSON
        else:
            print("Serving cached data")
            return self.cached_return_json
Ejemplo n.º 14
0
 def run(*args):
     msg = {
         'type': 'auth',
         'time': arrow.get().timestamp,
         'source': pat_id,
         'message': self.auth_token
         }
     ws.send(json.dumps(msg))
     time.sleep(5)
     tests = 10000
     start = arrow.get()
     for i in range(tests):
         #time.sleep(10)
         msg = {
             'type': 'ping',
             'time': arrow.get().timestamp,
             'to': 'server',
             'source': pat_id,
             'message': 'socket ping'
             }
         ws.send(json.dumps(msg))
         #print 'sent ping {}'.format(i)
     time.sleep(1)
     while total_cnt < tests:
         time.sleep(1)
     print 'tests: {}'.format(total_cnt)
     ws.close()
     print "thread terminating..."
     print 'takes: {}'.format((arrow.get() - start).total_seconds())
Ejemplo n.º 15
0
def parse_xue_qiu_comment_last_day(stock='SH600029', access_token=xq_a_token):
    url = 'http://xueqiu.com/statuses/search.json?count=15&comment=0&symbol={}&hl=0&source=all&sort=time&page=1&_=1439801060661'
    url = url.format(stock)
    payload = {'access_token': access_token}

    r = requests.get(url, params=payload, headers=headers)
    # print r
    # print r.json()
    comments = r.json().get('list')
    now = arrow.now()
    # print now
    today = now.date()
    # print str(today)

    today_begin = arrow.get(str(today)+'T00:00+08:00')
    today_end = arrow.get(str(today)+'T23:59+08:00')

    count = 0
    for comment in comments:
        timestamp = int(comment.get('created_at'))/1000
        utc = arrow.get(timestamp)
        local = utc.to('local')
        # print local
        if today_begin < utc < today_end:
            # print '***comment when trading***{}'.format(local)
            count += 1
        else:
            print(('comment not when trading:{}'.format(local)))
    # print 'stock {} comment:{}'.format(stock, count)
    return count
Ejemplo n.º 16
0
Archivo: utils.py Proyecto: I-GV/ics.py
def iso_to_arrow(time_container, available_tz={}):
    if time_container is None:
        return None

    # TODO : raise if not iso date
    tz_list = time_container.params.get('TZID')
    # TODO : raise if len(tz_list) > 1 or if tz is not a valid tz
    # TODO : see if timezone is registered as a VTIMEZONE
    if tz_list and len(tz_list) > 0:
        tz = tz_list[0]
    else:
        tz = None
    if ('T' not in time_container.value) and \
            'DATE' in time_container.params.get('VALUE', []):
        val = time_container.value + 'T0000'
    else:
        val = time_container.value

    if tz and not (val[-1].upper() == 'Z'):
        naive = arrow.get(val).naive
        selected_tz = gettz(tz)
        if not selected_tz:
            selected_tz = available_tz.get(tz, 'UTC')
        return arrow.get(naive, selected_tz)
    else:
        return arrow.get(val)
def request_data(server_url, from_ts, to_ts, phone_id, key_list, debug):
    url = server_url + "/datastreams/find_entries/time_type"
    request_body = {
        "user": phone_id,
        "key_list": key_list,
        "start_time": from_ts,
        "end_time": to_ts
    }
    headers = {'Content-Type': 'application/json'}

    r = requests.post(url, data=json.dumps(request_body), headers=headers)

    r.raise_for_status()

    dic = json.loads(r.text, object_hook=bju.object_hook)
    entry_list = dic['phone_data']

    if debug:
        logging.debug("first entry (in local time):")

        if len(entry_list) == 0:
            logging.debug("...has no data...")
        else:
            logging.debug(str(
                entry_list[0].get('metadata').get('write_fmt_time')))

    logging.debug("returning %d entries for batch %s (%s) -> %s (%s)" % 
        (len(entry_list),
        arrow.get(from_ts).to('local'), from_ts,
        arrow.get(to_ts).to('local'), to_ts))

    return entry_list
Ejemplo n.º 18
0
    def stats_between(self, start, stop):
        if not isinstance(start, arrow.Arrow):
            start = arrow.get(start)
        if not isinstance(stop, arrow.Arrow):
            stop = arrow.get(stop)

        return filter(lambda stat: start <= stat.date <= stop, self.stats)
Ejemplo n.º 19
0
def build_completed_backups(backup_dir):
    domain_names = ("a", "b", "vm-10", "matching", "matching2")
    backup_properties = (
        (arrow.get("2016-07-08 19:40:02").to("local"), None),
        (arrow.get("2016-07-08 18:40:02").to("local"), None),
        (arrow.get("2016-07-08 18:30:02").to("local"), None),
        (arrow.get("2016-07-08 17:40:02").to("local"), None),
        (arrow.get("2016-07-07 19:40:02").to("local"), None),
        (arrow.get("2016-07-07 21:40:02").to("local"), None),
        (arrow.get("2016-07-06 20:40:02").to("local"), None),
        (arrow.get("2016-04-08 19:40:02").to("local"), None),
        (arrow.get("2014-05-01 00:30:00").to("local"), "tar"),
        (arrow.get("2016-03-08 14:28:13").to("local"), "xz"),
    )
    conn = MockConn()
    for domain_id, domain_name in enumerate(domain_names):
        domain_bdir = os.path.join(backup_dir, domain_name)
        os.mkdir(domain_bdir)
        domain = MockDomain(conn, name=domain_name, id=domain_id)
        dbackup = DomBackup(
            domain, domain_bdir, dev_disks=("vda", "vdb")
        )

        for bakdate, compression in backup_properties:
            dbackup.compression = compression
            definition = build_complete_backup_files_from_domainbackup(
                dbackup, bakdate
            )
            dbackup._dump_json_definition(definition)
        # create a bad json file
        with open(os.path.join(domain_bdir, "badfile.json"), "w"):
            pass

    return (domain_names, (bp[0] for bp in backup_properties))
Ejemplo n.º 20
0
 def test_get_summonses_from_webpage_without_url(self):
     actual_summons = undertest.get_summonses_from_webpage(TestWebpageParsing.webpage)
     expected_summons = {
         arrow.get("August 2012", 'MMMM-YYYY'): "../../random_url1.shtml",
         arrow.get("December 2011", 'MMMM-YYYY'): "random_url3.shtml"
     }
     self.assertEqual(expected_summons, actual_summons)
Ejemplo n.º 21
0
 def test_get_collisions_from_webpage_without_url(self):
     actual_collision = undertest.get_collisions_from_webpage(TestWebpageParsing.webpage)
     expected_collision = {
         arrow.get("June 2013", 'MMMM-YYYY'): "../../random_url4.shtml",
         arrow.get("August 2015", 'MMMM-YYYY'): "random_url2.shtml"
     }
     self.assertEqual(expected_collision, actual_collision)
Ejemplo n.º 22
0
def func1():
    
        
    utc = arrow.utcnow()
    local = utc.to('Asia/Shanghai')
    ts = local.timestamp
    print arrow.get(ts)
    #print local.format('YYYY-MM-DD HH:mm:ss ZZ')
    
    """function and heartbeat"""
    
    ex = TimeoutException("timeout ex")
    
    #gevent timeout
    timeout = Timeout(6, ex)
    #start
    timeout.start()
    try:
        
        # exception will be raised here, after *seconds* 
        # passed since start() call
        
        gevent.sleep(3 * random.randint(1,4))
        #print "f1 heart beat"
        heartbeat("f1")

    except TimeoutException as ex:
        print ex
    finally:
        #cancel timeout
        timeout.cancel()
Ejemplo n.º 23
0
    def appand_general_info(self, user_data, name, user_amount):
        '''
        user_data: ((user_id1, register_date1),
                    (user_id2, register_date2),
                    (user_id3, register_date3),
                    ......)
        '''
        if len(user_data) > 10000:
            raise RuntimeError('处理数据过多,请分批处理,'
                               '单次处理数据应小于10000条')
        if len(user_data) == 0:
            raise RuntimeError('未找到注册用户')

        unzip_data = zip(*user_data)
        self.ids = next(unzip_data)
        r_dates = next(unzip_data)

        reg_date_str = '[{0} ~ {1}]'\
                       ''.format(arrow.get(min(r_dates)).format('YYYY-MM-DD'),
                                 arrow.get(max(r_dates)).format('YYYY-MM-DD'))
        general_info = {
                            'name': '{0} <br> {1}'
                            ''.format(name, reg_date_str),
                            'value': str(user_amount)
                        }

        # add to list
        self.result['res_data'].append(general_info)
Ejemplo n.º 24
0
def test_new_instance_cancelled(db, default_account, calendar):
    # Test that if we receive a cancelled override from Google, we save it
    # as an override with cancelled status rather than deleting it.
    event = recurring_event(db, default_account, calendar, TEST_EXDATE_RULE)
    override_uid = event.uid + "_20140814T203000Z"
    override = Event(title='CANCELLED',
                     description='',
                     uid=override_uid,
                     location='',
                     busy=False,
                     read_only=False,
                     reminders='',
                     recurrence=None,
                     start=arrow.get(2014, 8, 14, 22, 15, 00),
                     end=arrow.get(2014, 8, 14, 23, 15, 00),
                     all_day=False,
                     is_owner=False,
                     participants=[],
                     provider_name='inbox',
                     raw_data='',
                     original_start_tz='America/Los_Angeles',
                     original_start_time=arrow.get(2014, 8, 14, 21, 30, 00),
                     master_event_uid=event.uid,
                     cancelled=True,
                     source='local')
    handle_event_updates(default_account.namespace.id,
                         calendar.id,
                         [override], log, db.session)
    db.session.commit()
    # Check the event got saved with the cancelled flag
    find_override = db.session.query(Event).filter_by(
        uid=override_uid, namespace_id=default_account.namespace.id).one()
    assert find_override.cancelled is True
Ejemplo n.º 25
0
def overlap(event_sdt, event_edt):
    """
    This function returns true IFF the inputed event overlaps the desired meeting 
    time and date range.
    Arguments:
        event_sdt: arrow object representing the event's start date and time 
        event_edt: arrow object representing the event's end date and time 
    Returns: true IFF the inputed event overlaps the desired meeting 
    time and date range. 
    """
#sdt = start date time 
#edt = end date time 
    event_sd = event_sdt.date()
    event_ed = event_edt.date()
    event_st = event_sdt.time()
    event_et = event_edt.time()
    desired_sd= arrow.get(flask.session['begin_date']).date()
    desired_ed = arrow.get(flask.session['end_date']).date()
    desired_st = arrow.get(flask.session['begin_time']).time()
    desired_et = arrow.get(flask.session['end_time']).time()
    if not (desired_sd <= event_sd <= desired_ed) or not (desired_sd <= event_ed <= desired_ed):
        return False 
    elif (event_et <= desired_st):
        return False 
    elif (event_st >= desired_et):
        return False
    else:
        return True
Ejemplo n.º 26
0
def test_all_day_rrule_parsing(db, default_account, calendar):
    event = recurring_event(db, default_account, calendar, ALL_DAY_RRULE,
                            start=arrow.get(2014, 8, 7),
                            end=arrow.get(2014, 8, 7),
                            all_day=True)
    g = get_start_times(event)
    assert len(g) == 6
Ejemplo n.º 27
0
def test_non_recurring_events_behave(db, default_account, calendar):
    event = Event(namespace_id=default_account.namespace.id,
                  calendar=calendar,
                  title='not recurring',
                  description='',
                  uid='non_recurring_uid',
                  location='',
                  busy=False,
                  read_only=False,
                  reminders='',
                  recurrence=None,
                  start=arrow.get(2014, 07, 07, 13, 30),
                  end=arrow.get(2014, 07, 07, 13, 55),
                  all_day=False,
                  is_owner=False,
                  participants=[],
                  provider_name='inbox',
                  raw_data='',
                  original_start_tz='America/Los_Angeles',
                  original_start_time=None,
                  master_event_uid=None,
                  source='local')
    assert isinstance(event, Event)
    with pytest.raises(AttributeError):
        event.inflate()
def lab_env_db():
	temperatures, humidities, timezone, from_date_str, to_date_str = get_records()

	# Create new record tables so that datetimes are adjusted back to the user browser's time zone.
	time_adjusted_temperatures = []
	time_adjusted_humidities   = []
	for record in temperatures:
		local_timedate = arrow.get(record[0], "YYYY-MM-DD HH:mm").to(timezone)
		time_adjusted_temperatures.append([local_timedate.format('YYYY-MM-DD HH:mm'), round(record[2],2)])

	for record in humidities:
		local_timedate = arrow.get(record[0], "YYYY-MM-DD HH:mm").to(timezone)
		time_adjusted_humidities.append([local_timedate.format('YYYY-MM-DD HH:mm'), round(record[2],2)])

	print "rendering lab_env_db.html with: %s, %s, %s" % (timezone, from_date_str, to_date_str)

	return render_template("lab_env_db.html",	timezone		= timezone,
												temp 			= time_adjusted_temperatures,
												hum 			= time_adjusted_humidities, 
												from_date 		= from_date_str, 
												to_date 		= to_date_str,
												temp_items 		= len(temperatures),
												query_string	= request.query_string, #This query string is used
																						#by the Plotly link
												hum_items 		= len(humidities))
Ejemplo n.º 29
0
    def ticks(self, domain_min, domain_max):
        interval = self._interval

        # If the caller didn't specify an interval, find one that yields the number of locations closest to the requested count
        if interval is None:
            if self._count is None:
                self._count = 7

            closest_difference = None

            for match in _intervals:
                count = (domain_max - domain_min) / match["duration"].total_seconds()
                difference = numpy.abs(self._count - count)

                if (closest_difference is None) or (difference < closest_difference):
                    interval = match
                    closest_difference = difference

        # Generate ticks
        generator = interval["generator"]
        label_format = interval["format"] if self._format is None else self._format

        locations = [location for location in generator(arrow.get(domain_min), arrow.get(domain_max), self._timezone)]
        labels = [label_format.format(arrow.get(location).to(self._timezone)) for location in locations]
        titles = numpy.repeat(None, len(labels))
        return locations, labels, titles
Ejemplo n.º 30
0
def test_avail_resource_scalability(client):
    u1 = Unit.objects.create(name='Unit 1', id='unit_1', time_zone='Europe/Helsinki')
    rt = ResourceType.objects.create(name='Type 1', id='type_1', main_type='space')
    p1 = Period.objects.create(start='2015-06-01', end='2015-09-01', unit=u1, name='')
    Day.objects.create(period=p1, weekday=0, opens='08:00', closes='22:00')
    Day.objects.create(period=p1, weekday=1, opens='08:00', closes='16:00')
    # make reservations for the whole day
    begin_res = arrow.get('2015-06-01T08:00:00Z').datetime
    end_res = arrow.get('2015-06-01T16:00:00Z').datetime

    perf_res_list = open('perf_res_list.csv', 'w')
    perf_res_avail = open('perf_res_avail.csv', 'w')
    perf_reservation = open('perf_reservation.csv', 'w')
    perf_res_list.write('Resource listing\n')
    perf_res_list.write('resources, time (s)\n')
    perf_res_avail.write('Availability listing\n')
    perf_res_avail.write('resources, time (s)\n')
    perf_reservation.write('Single resource availability\n')
    perf_reservation.write('Total reservations, time (s)\n')
    for n in [1, 10, 100, 1000]:
        Resource.objects.all().delete()
        for i in range(n):
            resource = Resource.objects.create(name=('Resource ' + str(i)), id=('r' + str(i)), unit=u1, type=rt)
            Reservation.objects.create(resource=resource, begin=begin_res, end=end_res)

        # Time the general availability for n resources and reservations
        start = datetime.now()
        response = client.get('/test/availability?start_date=2015-06-01&end_date=2015-06-30')
        end = datetime.now()
        perf_res_list.write(str(n) + ', ' + str(end - start) + '\n')
Ejemplo n.º 31
0
 def published_day(self):
     return arrow.get(self.published).format('ddd, MMM D YYYY')
Ejemplo n.º 32
0
from dateutil.parser import parse
import re
import pandas as pd

ignore_subjects = [u'WordBuild Elements 1',
                   u'Personal and Family Finance',
                   u'Orientation - High School Stud',
                   u'Chinese Mandarin Course 2',
                   u'HS Physical Education',
                   u'State History Report']
today = date.today()
offset = (today.weekday() - 3) % 7
start_week = today - timedelta(days=offset)
end_week = start_week + datetime.timedelta(6)
file = 'data.json'
last_modified = arrow.get(parse(time.ctime(os.path.getmtime(file)))).replace(tzinfo='local')

with open(file) as f:
    data = json.load(f)
for s in ignore_subjects:
    if s in data:
        del data[s]
subjects = data.keys()
grades = []
completion = []
done = {}
today_d = []
week_d = []
total_plus_b = 0
total_units_this_week = 0
total_units_today = 0
Ejemplo n.º 33
0
def edit_my_work_time_api_view(request):
    """
    API chỉnh sửa hoặc xóa giờ làm việc của tôi
    :param request:
    :return:
    """
    user_id = request.user.id
    work_time_id = request.data.get('work_time_id')
    work_date = request.data.get('date')
    start_in_day = request.data.get('start_in_day')
    end_in_day = request.data.get('end_in_day')
    rest_time = request.data.get('rest_time')
    work_time_request = request.data.get('work_time_request')

    if request.user.is_authenticated:
        if not work_time_id:
            return Response({
                'ok': False,
                'msg': 'Không tồn tại giờ làm việc này!',
            })

        if not can_be_integer(work_time_id):
            return Response({
                'ok': False,
                'msg': 'Không tìm thấy giờ làm việc của bạn!',
            })

        if not work_time_request:
            return Response({
                'ok': False,
                'msg': 'Không xác định được yêu cầu!',
            })

        if work_time_request != 'edit' and work_time_request != 'remove':
            return Response({
                'ok': False,
                'msg': 'Yêu cầu không hợp lệ!',
            })

        if work_time_request == 'remove':
            try:
                profile = Profile.objects.get(user__id=user_id, removed=False)
                dpm = DepartmentMember.objects.get(department_member=profile, removed=False)
                wt = WorkTime.objects.get(id=work_time_id, user=dpm, removed=False)

                wt.removed = True
                wt.save()

                return Response({
                    'ok': True,
                    'msg': 'Xoá thành công!',
                })

            except (Profile.DoesNotExist, DepartmentMember.DoesNotExist, WorkTime.DoesNotExist):
                return Response({
                    'ok': False,
                    'msg': 'Lỗi dữ liệu!',
                })

        if work_time_request == 'edit':
            if not work_date:
                return Response({
                    'ok': False,
                    'msg': 'Ngày làm việc không được để trống!',
                })

            if not start_in_day:
                return Response({
                    'ok': False,
                    'msg': 'Thời gian bắt đầu không được để trống!',
                })

            if work_date:
                work_date = arrow.get(work_date).to(settings.TIME_ZONE).date()

            if start_in_day:
                start_in_day = arrow.get(start_in_day).to(settings.TIME_ZONE).time()

            if end_in_day:
                end_in_day = arrow.get(end_in_day).to(settings.TIME_ZONE).time()

            if start_in_day and end_in_day and end_in_day < start_in_day:
                return Response({
                    'ok': False,
                    'msg': 'Thời gian kết thúc không được trước thời gian bắt đầu!',
                })

            try:
                profile = Profile.objects.get(user__id=user_id, removed=False)
                dpm = DepartmentMember.objects.get(department_member=profile, removed=False)
                wt = WorkTime.objects.get(id=work_time_id, user=dpm, removed=False)

                total = 0
                if start_in_day and end_in_day:
                    start = datetime.combine(date.today(), start_in_day)
                    end = datetime.combine(date.today(), end_in_day)
                    total = end - start
                    total = total.total_seconds()
                    total = float(total) / 3600.0

                    if rest_time:
                        total = float(total) - float(rest_time)

                wt.date = work_date
                wt.start_in_day = start_in_day
                wt.end_in_day = end_in_day
                wt.rest_time = rest_time
                wt.time_total = total
                wt.save()

                return Response({
                    'ok': True,
                    'msg': 'Cập nhật thành công!',
                })
            except (Profile.DoesNotExist, DepartmentMember.DoesNotExist, WorkTime.DoesNotExist):
                return Response({
                    'ok': False,
                    'msg': 'Lỗi dữ liệu!',
                })

    return Response({
        'ok': False,
        'msg': 'Bạn chưa đăng nhập!',
    })
Ejemplo n.º 34
0
def add_my_work_time_api_view(request):
    """
    API thêm giờ làm việc của tôi
    :param request:
    :return:
    """
    user_id = request.user.id
    work_date = request.data.get('date')
    start_in_day = request.data.get('start_in_day')
    end_in_day = request.data.get('end_in_day')
    rest_time = request.data.get('rest_time')

    if request.user.is_authenticated:
        if not work_date:
            return Response({
                'ok': False,
                'msg': 'Ngày làm việc không được để trống!',
            })

        if not start_in_day:
            return Response({
                'ok': False,
                'msg': 'Thời gian bắt đầu không được để trống!',
            })
        if work_date:
            work_date = arrow.get(work_date).to(settings.TIME_ZONE).date()

        if start_in_day:
            start_in_day = arrow.get(start_in_day).to(settings.TIME_ZONE).time()

        if end_in_day:
            end_in_day = arrow.get(end_in_day).to(settings.TIME_ZONE).time()

        if start_in_day and end_in_day and end_in_day < start_in_day:
            return Response({
                'ok': False,
                'msg': 'Thời gian kết thúc không được trước thời gian bắt đầu!',
            })

        try:
            profile = Profile.objects.get(user__id=user_id, removed=False)
            dpm = DepartmentMember.objects.get(department_member=profile, removed=False)

            total = 0
            if start_in_day and end_in_day:
                start = datetime.combine(date.today(), start_in_day)
                end = datetime.combine(date.today(), end_in_day)
                total = end - start
                total = total.total_seconds()
                total = float(total) / 3600.0

                if rest_time:
                    total = float(total) - float(rest_time)

            worktime = WorkTime(
                user=dpm,
                date=work_date,
                start_in_day=start_in_day,
                end_in_day=end_in_day,
                rest_time=rest_time,
                time_total=total,
            )
            worktime.save()

            return Response({
                'ok': True,
                'msg': 'Báo cáo giờ làm việc thành công!',
            })
        except (Profile.DoesNotExist, DepartmentMember.DoesNotExist):
            return Response({
                'ok': False,
                'msg': 'Lỗi dữ liệu!',
            })

    return Response({
        'ok': False,
        'msg': 'Bạn chưa đăng nhập!',
    })
Ejemplo n.º 35
0
def test_match_history_7():
    region = "NA"
    summoner = Summoner(name="Kalturi", region=region)
    match_history = cass.get_match_history(summoner=summoner, seasons={Season.season_8}, queues={Queue.ranked_solo_fives}, begin_time=arrow.get(2016, 12, 1))
    assert len(match_history) > 0
Ejemplo n.º 36
0
def test_match_history_3():
    region = "NA"
    summoner = Summoner(name="Kalturi", region=region)
    match_history = cass.get_match_history(summoner=summoner, queues={Queue.ranked_solo_fives}, begin_time=arrow.get(2017, 2, 7), end_time=arrow.get(2017, 2, 14))
    assert len(match_history) == 16
Ejemplo n.º 37
0
 def _serialize(self, value: datetime) -> int:
     return int(arrow.get(value).float_timestamp * 1000)
Ejemplo n.º 38
0
def test_last_sync_with_nonexistent_file(mocker, watson):
    mocker.patch('builtins.open', side_effect=IOError)
    assert watson.last_sync == arrow.get(0)
Ejemplo n.º 39
0
def test_last_sync(mocker, watson):
    now = arrow.get(4123)
    content = json.dumps(now.timestamp)

    mocker.patch('builtins.open', mocker.mock_open(read_data=content))
    assert watson.last_sync == now
Ejemplo n.º 40
0
def format_arrow_date(date):
    try:
        normal = arrow.get(date)
        return normal.format("ddd MM/DD/YYYY")
    except:
        return "(bad date)"
class InsightNonParametricData:
	DEFAULT_START_TIME = arrow.get('2106-01-01 00:00:00')
	DEFAULT_END_TIME = arrow.get('2006-01-01 00:00:00')
	# DEFAULT_DATE_FORMAT = 'YYYY-MM-DD HH:MM:SS'
	@staticmethod
	def is_insight(dat):
		if not isinstance(dat, list):
			return False
		if len(dat) < 7:
			return False
		for line in dat:
			if not BirdEyeUtil.is_str_collection(line):
				return False
		line_lens = set([len(line) for line in dat])
		if len(line_lens) > 2:
			return False
		x = sorted(list(line_lens))
		if x[0] != len(dat[0]) or x[0] != len(dat[2]):
			return False
		return True
	@staticmethod
	def extract(dat):
		if not InsightNonParametricData.is_insight(dat):
			return None
		n = len(dat[0])-1
		col = [[] for i in range(n)]
		for line in dat[7:]:
			for i in range(n):
				col[i].append(line[i])
		return {dat[1][i] : col[i] for i in range(n)}
	@staticmethod
	def get_start_time(meta):
		a = meta.get('StartTime', [])
		r = InsightNonParametricData.DEFAULT_START_TIME
		for time_str in a:
			try:
				t = arrow.get(time_str)
				if t < r:
					r = t
			except:
				pass
		return r
	@staticmethod
	def get_end_time(meta):
		a = meta.get('EndTime', [])
		r = InsightNonParametricData.DEFAULT_END_TIME
		for time_str in a:
			try:
				t = arrow.get(time_str)
				if t > r:
					r = t
			except:
				pass
		return r
	@staticmethod
	def get_sites(meta):
		return set(meta.get('Site', []))
	@staticmethod
	def extract_build(a):
		r = ''
		a = a.strip()
		if len(a) > 0:
			a = a.replace('_', '-')
			w = a.split('-')
			r = w[1] if len(w) > 1 else a
		return r
	@staticmethod
	def build(b, d):
		r = InsightNonParametricData.extract_build(b)
		if len(r) == 0:
			r = InsightNonParametricData.extract_build(d)
		return r
	@staticmethod
	def get_builds(meta):
		a = meta['Special Build Name']
		b = meta.get('Special Build Description', meta.get('Special Build Descripton', ''))
		c = [InsightNonParametricData.build(a[i], b[i]) for i in range(len(a))]
		return set(c)
	@staticmethod
	def get_meta_data_index(meta):
		r = dict()
		r['product'] = set(meta.get('Product', set()))
		r['build'] = InsightNonParametricData.get_builds(meta)
		r['site'] = InsightNonParametricData.get_sites(meta)
		r['starttime'] = InsightNonParametricData.get_start_time(meta)
		r['endtime'] =  InsightNonParametricData.get_end_time(meta)
		return r
	@staticmethod
	def index(data):
		if not InsightNonParametricData.is_insight(data):
			return dict()
		meta = InsightNonParametricData.extract(data)
		r = InsightNonParametricData.get_meta_data_index(meta)
		r['station'] = {data[0][0].strip()}
		w = data[0][1].split(';')
		w1 = [x.strip() for x in w]
		r['version'] = set(w1)
		return r
Ejemplo n.º 42
0
def test_last_sync_with_empty_file(mocker, watson):
    mocker.patch('builtins.open', mocker.mock_open(read_data=""))
    mocker.patch('os.path.getsize', return_value=0)
    assert watson.last_sync == arrow.get(0)
Ejemplo n.º 43
0
    def post(self):
        """
        ---
        description: Create new candidate(s) (one per filter).
        tags:
          - candidates
        requestBody:
          content:
            application/json:
              schema:
                allOf:
                  - $ref: '#/components/schemas/Obj'
                  - type: object
                    properties:
                      filter_ids:
                        type: array
                        items:
                          type: integer
                        description: List of associated filter IDs
                      passing_alert_id:
                        type: integer
                        description: ID of associated filter that created candidate
                        nullable: true
                      passed_at:
                        type: string
                        description: Arrow-parseable datetime string indicating when passed filter.
                        nullable: true
                    required:
                      - filter_ids
                      - passed_at
        responses:
          200:
            content:
              application/json:
                schema:
                  allOf:
                    - $ref: '#/components/schemas/Success'
                    - type: object
                      properties:
                        data:
                          type: object
                          properties:
                            ids:
                              type: array
                              items:
                                type: integer
                              description: List of new candidate IDs
        """
        data = self.get_json()
        obj_already_exists = Obj.query.get(data["id"]) is not None
        schema = Obj.__schema__()

        ra = data.get('ra', None)
        dec = data.get('dec', None)

        if ra is None and not obj_already_exists:
            return self.error("RA must not be null for a new Obj")

        if dec is None and not obj_already_exists:
            return self.error("Dec must not be null for a new Obj")

        passing_alert_id = data.pop("passing_alert_id", None)
        passed_at = data.pop("passed_at", None)
        if passed_at is None:
            return self.error("Missing required parameter: `passed_at`.")
        passed_at = arrow.get(passed_at).datetime
        try:
            filter_ids = data.pop("filter_ids")
        except KeyError:
            return self.error("Missing required filter_ids parameter.")
        user_accessible_filter_ids = [
            filtr.id for g in self.current_user.accessible_groups
            for filtr in g.filters if g.filters is not None
        ]
        if not all([fid in user_accessible_filter_ids for fid in filter_ids]):
            return self.error(
                "Insufficient permissions - you must only specify "
                "filters that you have access to.")

        try:
            obj = schema.load(data)
        except ValidationError as e:
            return self.error("Invalid/missing parameters: "
                              f"{e.normalized_messages()}")
        filters = Filter.query.filter(Filter.id.in_(filter_ids)).all()
        if not filters:
            return self.error("At least one valid filter ID must be provided.")

        update_redshift_history_if_relevant(data, obj,
                                            self.associated_user_object)

        DBSession().add(obj)
        candidates = [
            Candidate(
                obj=obj,
                filter=filter,
                passing_alert_id=passing_alert_id,
                passed_at=passed_at,
                uploader_id=self.associated_user_object.id,
            ) for filter in filters
        ]
        DBSession().add_all(candidates)
        self.finalize_transaction()
        if not obj_already_exists:
            obj.add_linked_thumbnails()

        return self.success(data={"ids": [c.id for c in candidates]})
Ejemplo n.º 44
0
def test_last_sync_with_empty_given_state(config_dir, mocker):
    content = json.dumps(123)
    watson = Watson(last_sync=None, config_dir=config_dir)

    mocker.patch('builtins.open', mocker.mock_open(read_data=content))
    assert watson.last_sync == arrow.get(0)
Ejemplo n.º 45
0
    def read_data(self, **kwargs):
        """
            get the data from the service

            :param kwargs: contain keyword args : trigger_id at least
            :type kwargs: dict
            :rtype: list
        """
        now = arrow.utcnow().to(settings.TIME_ZONE)
        my_toots = []
        search = {}
        since_id = None
        trigger_id = kwargs['trigger_id']
        date_triggered = arrow.get(kwargs['date_triggered'])

        def _get_toots(toot_api, toot_obj, search):
            """
                get the toots from mastodon and return the filters to use

                :param toot_obj: from Mastodon model
                :param search: filter used for MastodonAPI.search()
                :type toot_obj: Object ServiceMastodon
                :type search: dict
                :return: the filter named search, the toots
                :rtype: list
            """
            max_id = 0 if toot_obj.max_id is None else toot_obj.max_id
            since_id = 0 if toot_obj.since_id is None else toot_obj.since_id
            # get the toots for a given tag
            statuses = ''

            if toot_obj.tag:
                search['q'] = toot_obj.tag
                # do a search
                statuses = toot_api.search(**search)
                # just return the content of te statuses array
                statuses = statuses['statuses']

            # get the tweets from a given user
            elif toot_obj.tooter:
                search['id'] = toot_obj.tooter
                # call the user timeline and get his toot
                if toot_obj.fav:
                    statuses = toot_api.favourites(max_id=max_id,
                                                   since_id=since_id)
                else:
                    user_id = toot_api.account_search(q=toot_obj.tooter)
                    statuses = toot_api.account_statuses(
                        id=user_id[0]['id'], max_id=toot_obj.max_id,
                        since_id=toot_obj.since_id)

            return search, statuses

        if self.token is not None:
            kw = {'app_label': 'th_mastodon', 'model_name': 'Mastodon', 'trigger_id': trigger_id}
            toot_obj = super(ServiceMastodon, self).read_data(**kw)

            us = UserService.objects.get(token=self.token, name='ServiceMastodon')
            try:
                toot_api = MastodonAPI(
                    client_id=us.client_id,
                    client_secret=us.client_secret,
                    access_token=self.token,
                    api_base_url=us.host,
                )
            except ValueError as e:
                logger.error(e)
                update_result(trigger_id, msg=e, status=False)

            if toot_obj.since_id is not None and toot_obj.since_id > 0:
                since_id = toot_obj.since_id
                search = {'since_id': toot_obj.since_id}

            # first request to Mastodon
            search, statuses = _get_toots(toot_api, toot_obj, search)

            if len(statuses) > 0:
                newest = None
                for status in statuses:
                    if newest is None:
                        newest = True
                        # first query ; get the max id
                        search['max_id'] = max_id = status['id']

                since_id = search['since_id'] = statuses[-1]['id'] - 1

                search, statuses = _get_toots(toot_api, toot_obj, search)

                newest = None
                if len(statuses) > 0:
                    my_toots = []
                    for s in statuses:
                        if newest is None:
                            newest = True
                            max_id = s['id'] - 1
                        toot_name = s['account']['username']
                        # get the text of the tweet + url to this one

                        title = _('Toot from <a href="{}">@{}</a>'.
                                  format(us.host, toot_name))

                        my_date = arrow.get(s['created_at']).to(
                            settings.TIME_ZONE)
                        published = arrow.get(my_date).to(settings.TIME_ZONE)
                        if date_triggered is not None and \
                           published is not None and \
                           now >= published >= date_triggered:
                            my_toots.append({'title': title,
                                             'content': s['content'],
                                             'link': s['url'],
                                             'my_date': my_date})
                            # digester
                            self.send_digest_event(trigger_id, title, s['url'])
                    cache.set('th_mastodon_' + str(trigger_id), my_toots)
                    Mastodon.objects.filter(trigger_id=trigger_id).update(
                        since_id=since_id, max_id=max_id)
        return my_toots
Ejemplo n.º 46
0
    def get(self, obj_id=None):
        """
        ---
        single:
          description: Retrieve a candidate
          tags:
            - candidates
          parameters:
            - in: path
              name: obj_id
              required: true
              schema:
                type: string
          responses:
            200:
              content:
                application/json:
                  schema: SingleObj
            400:
              content:
                application/json:
                  schema: Error
        multiple:
          tags:
            - candidates
          description: Retrieve all candidates
          parameters:
          - in: query
            name: numPerPage
            nullable: true
            schema:
              type: integer
            description: |
              Number of candidates to return per paginated request. Defaults to 25
          - in: query
            name: pageNumber
            nullable: true
            schema:
              type: integer
            description: Page number for paginated query results. Defaults to 1
          - in: query
            name: totalMatches
            nullable: true
            schema:
              type: integer
            description: |
              Used only in the case of paginating query results - if provided, this
              allows for avoiding a potentially expensive query.count() call.
          - in: query
            name: savedStatus
            nullable: true
            schema:
                type: string
                enum: [all, savedToAllSelected, savedToAnySelected, savedToAnyAccessible, notSavedToAnyAccessible, notSavedToAnySelected, notSavedToAllSelected]
            description: |
                String indicating the saved status to filter candidate results for. Must be one of the enumerated values.
          - in: query
            name: startDate
            nullable: true
            schema:
              type: string
            description: |
              Arrow-parseable date string (e.g. 2020-01-01). If provided, filter by
              Candidate.passed_at >= startDate
          - in: query
            name: endDate
            nullable: true
            schema:
              type: string
            description: |
              Arrow-parseable date string (e.g. 2020-01-01). If provided, filter by
              Candidate.passed_at <= endDate
          - in: query
            name: groupIDs
            nullable: true
            schema:
              type: array
              items:
                type: integer
            explode: false
            style: simple
            description: |
              Comma-separated string of group IDs (e.g. "1,2"). Defaults to all of user's
              groups if filterIDs is not provided.
          - in: query
            name: filterIDs
            nullable: true
            schema:
              type: array
              items:
                type: integer
            explode: false
            style: simple
            description: |
              Comma-separated string of filter IDs (e.g. "1,2"). Defaults to all of user's
              groups' filters if groupIDs is not provided.
          - in: query
            name: annotationExcludeOrigin
            nullable: true
            schema:
              type: string
            description: |
              Only load objects that do not have annotations from this origin.
              If the annotationsExcludeOutdatedDate is also given, then annotations with
              this origin will still be loaded if they were modified before that date.
          - in: query
            name: annotationExcludeOutdatedDate
            nullable: true
            schema:
              type: string
            description: |
              An Arrow parseable string designating when an existing annotation is outdated.
              Only relevant if giving the annotationExcludeOrigin argument.
              Will treat objects with outdated annotations as if they did not have that annotation,
              so it will load an object if it doesn't have an annotation with the origin specified or
              if it does have it but the annotation modified date < annotationsExcludeOutdatedDate
          - in: query
            name: sortByAnnotationOrigin
            nullable: true
            schema:
              type: string
            description: |
              The origin of the Annotation to sort by
          - in: query
            name: sortByAnnotationKey
            nullable: true
            schema:
              type: string
            description: |
              The key of the Annotation data value to sort by
          - in: query
            name: sortByAnnotationOrder
            nullable: true
            schema:
              type: string
            description: |
              The sort order for annotations - either "asc" or "desc".
              Defaults to "asc".
          - in: query
            name: annotationFilterList
            nullable: true
            schema:
              type: array
              items:
                type: string
            explode: false
            style: simple
            description: |
              Comma-separated string of JSON objects representing annotation filters.
              Filter objects are expected to have keys { origin, key, value } for
              non-numeric value types, or { origin, key, min, max } for numeric values.
          - in: query
            name: includePhotometry
            nullable: true
            schema:
              type: boolean
            description: |
              Boolean indicating whether to include associated photometry. Defaults to
              false.
          - in: query
            name: includeSpectra
            nullable: true
            schema:
              type: boolean
            description: |
              Boolean indicating whether to include associated spectra. Defaults to false.
          - in: query
            name: classifications
            nullable: true
            schema:
              type: array
              items:
                type: string
            explode: false
            style: simple
            description: |
              Comma-separated string of classification(s) to filter for candidates matching
              that/those classification(s).
          - in: query
            name: redshiftRange
            nullable: True
            schema:
                type: list
            description: |
                lowest and highest redshift to return, e.g. "(0,0.5)"
          responses:
            200:
              content:
                application/json:
                  schema:
                    allOf:
                      - $ref: '#/components/schemas/Success'
                      - type: object
                        properties:
                          data:
                            type: object
                            properties:
                              candidates:
                                type: array
                                items:
                                  allOf:
                                    - $ref: '#/components/schemas/Obj'
                                    - type: object
                                      properties:
                                        is_source:
                                          type: boolean
                              totalMatches:
                                type: integer
                              pageNumber:
                                type: integer
                              numPerPage:
                                type: integer
            400:
              content:
                application/json:
                  schema: Error
        """
        user_accessible_group_ids = [
            g.id for g in self.current_user.accessible_groups
        ]
        include_photometry = self.get_query_argument("includePhotometry",
                                                     False)
        include_spectra = self.get_query_argument("includeSpectra", False)

        if obj_id is not None:
            query_options = [
                joinedload(Candidate.obj).joinedload(Obj.thumbnails)
            ]
            if include_photometry:
                query_options.append(
                    joinedload(Candidate.obj).joinedload(
                        Obj.photometry).joinedload(Photometry.instrument))
            if include_spectra:
                query_options.append(
                    joinedload(Candidate.obj).joinedload(
                        Obj.spectra).joinedload(Spectrum.instrument))
            c = Candidate.get_obj_if_readable_by(
                obj_id,
                self.current_user,
                options=query_options,
            )
            if c is None:
                return self.error("Invalid ID")
            accessible_candidates = (
                DBSession().query(Candidate).join(Filter).filter(
                    Candidate.obj_id == obj_id,
                    Filter.group_id.in_(
                        [g.id for g in self.current_user.accessible_groups]),
                ).all())
            filter_ids = [cand.filter_id for cand in accessible_candidates]

            passing_alerts = [{
                "filter_id": cand.filter_id,
                "passing_alert_id": cand.passing_alert_id,
                "passed_at": cand.passed_at,
            } for cand in accessible_candidates]

            candidate_info = c.to_dict()
            candidate_info["filter_ids"] = filter_ids
            candidate_info["passing_alerts"] = passing_alerts
            candidate_info["comments"] = sorted(
                [
                    cmt.to_dict()
                    for cmt in c.get_comments_readable_by(self.current_user)
                ],
                key=lambda x: x["created_at"],
                reverse=True,
            )
            candidate_info["annotations"] = sorted(
                c.get_annotations_readable_by(self.current_user),
                key=lambda x: x.origin,
            )
            candidate_info["is_source"] = len(c.sources) > 0
            if candidate_info["is_source"]:
                candidate_info["saved_groups"] = (
                    DBSession().query(Group).join(Source).filter(
                        Source.obj_id == obj_id).filter(
                            Source.active.is_(True)).filter(
                                Group.id.in_(user_accessible_group_ids)).all())
                candidate_info[
                    "classifications"] = c.get_classifications_readable_by(
                        self.current_user)
            candidate_info["last_detected_at"] = c.last_detected_at
            candidate_info["gal_lon"] = c.gal_lon_deg
            candidate_info["gal_lat"] = c.gal_lat_deg
            candidate_info["luminosity_distance"] = c.luminosity_distance
            candidate_info["dm"] = c.dm
            candidate_info[
                "angular_diameter_distance"] = c.angular_diameter_distance
            self.verify_permissions()
            return self.success(data=candidate_info)

        page_number = self.get_query_argument("pageNumber", None) or 1
        n_per_page = self.get_query_argument("numPerPage", None) or 25
        saved_status = self.get_query_argument("savedStatus", "all")
        total_matches = self.get_query_argument("totalMatches", None)
        start_date = self.get_query_argument("startDate", None)
        end_date = self.get_query_argument("endDate", None)
        group_ids = self.get_query_argument("groupIDs", None)
        filter_ids = self.get_query_argument("filterIDs", None)
        annotation_exclude_origin = self.get_query_argument(
            'annotationExcludeOrigin', None)
        annotation_exclude_date = self.get_query_argument(
            'annotationExcludeOutdatedDate', None)
        sort_by_origin = self.get_query_argument("sortByAnnotationOrigin",
                                                 None)
        annotation_filter_list = self.get_query_argument(
            "annotationFilterList", None)
        classifications = self.get_query_argument("classifications", None)
        redshift_range_str = self.get_query_argument("redshiftRange", None)
        user_accessible_group_ids = [
            g.id for g in self.current_user.accessible_groups
        ]
        user_accessible_filter_ids = [
            filtr.id for g in self.current_user.accessible_groups
            for filtr in g.filters if g.filters is not None
        ]
        if group_ids is not None:
            if isinstance(group_ids, str) and "," in group_ids:
                group_ids = [int(g_id) for g_id in group_ids.split(",")]
            elif isinstance(group_ids, str) and group_ids.isdigit():
                group_ids = [int(group_ids)]
            else:
                return self.error(
                    "Invalid groupIDs value -- select at least one group")
            filter_ids = [
                f.id
                for f in Filter.query.filter(Filter.group_id.in_(group_ids))
            ]
        elif filter_ids is not None:
            if "," in filter_ids:
                filter_ids = [int(f_id) for f_id in filter_ids.split(",")]
            elif filter_ids.isdigit():
                filter_ids = [int(filter_ids)]
            else:
                return self.error("Invalid filterIDs paramter value.")
            group_ids = [
                f.group_id
                for f in Filter.query.filter(Filter.id.in_(filter_ids))
            ]
        else:
            # If 'groupIDs' & 'filterIDs' params not present in request, use all user groups
            group_ids = user_accessible_group_ids
            filter_ids = user_accessible_filter_ids

        # Ensure user has access to specified groups/filters
        if not (all([gid in user_accessible_group_ids
                     for gid in group_ids]) and
                all([fid in user_accessible_filter_ids
                     for fid in filter_ids])):
            return self.error(
                "Insufficient permissions - you must only specify "
                "groups/filters that you have access to.")
        try:
            page = int(page_number)
        except ValueError:
            return self.error("Invalid page number value.")
        try:
            n_per_page = int(n_per_page)
        except ValueError:
            return self.error("Invalid numPerPage value.")

        # We'll join in the nested data for Obj (like photometry) later
        q = (DBSession().query(Obj).join(Candidate).filter(
            Obj.id.in_(DBSession().query(Candidate.obj_id).filter(
                Candidate.filter_id.in_(filter_ids)))).outerjoin(Annotation)
             )  # Join in annotations info for sort/filter
        if classifications is not None:
            if isinstance(classifications, str) and "," in classifications:
                classifications = [
                    c.strip() for c in classifications.split(",")
                ]
            elif isinstance(classifications, str):
                classifications = [classifications]
            else:
                return self.error(
                    "Invalid classifications value -- must provide at least one string value"
                )
            q = q.join(Classification).filter(
                Classification.classification.in_(classifications))
        if sort_by_origin is None:
            # Don't apply the order by just yet. Save it so we can pass it to
            # the LIMT/OFFSET helper function down the line once other query
            # params are set.
            order_by = [Candidate.passed_at.desc().nullslast(), Obj.id]

        if saved_status in [
                "savedToAllSelected",
                "savedToAnySelected",
                "savedToAnyAccessible",
                "notSavedToAnyAccessible",
                "notSavedToAnySelected",
                "notSavedToAllSelected",
        ]:
            notin = False
            active_sources = (DBSession().query(Source.obj_id).filter(
                Source.active.is_(True)))
            if saved_status == "savedToAllSelected":
                # Retrieve objects that have as many active saved groups that are
                # in 'group_ids' as there are items in 'group_ids'
                subquery = (active_sources.filter(
                    Source.group_id.in_(group_ids)).group_by(
                        Source.obj_id).having(
                            func.count(Source.group_id) == len(group_ids)))
            elif saved_status == "savedToAnySelected":
                subquery = active_sources.filter(
                    Source.group_id.in_(group_ids))
            elif saved_status == "savedToAnyAccessible":
                subquery = active_sources.filter(
                    Source.group_id.in_(user_accessible_group_ids))
            elif saved_status == "notSavedToAnyAccessible":
                subquery = active_sources.filter(
                    Source.group_id.in_(user_accessible_group_ids))
                notin = True
            elif saved_status == "notSavedToAnySelected":
                subquery = active_sources.filter(
                    Source.group_id.in_(group_ids))
                notin = True
            elif saved_status == "notSavedToAllSelected":
                # Retrieve objects that have as many active saved groups that are
                # in 'group_ids' as there are items in 'group_ids', and select
                # the objects not in that set
                subquery = (active_sources.filter(
                    Source.group_id.in_(group_ids)).group_by(
                        Source.obj_id).having(
                            func.count(Source.group_id) == len(group_ids)))
                notin = True
            q = (q.filter(Obj.id.notin_(subquery)) if notin else q.filter(
                Obj.id.in_(subquery)))
        elif saved_status != "all":
            return self.error(
                f"Invalid savedStatus: {saved_status}. Must be one of the enumerated options."
            )

        if start_date is not None and start_date.strip() not in [
                "",
                "null",
                "undefined",
        ]:
            start_date = arrow.get(start_date).datetime
            q = q.filter(Candidate.passed_at >= start_date)
        if end_date is not None and end_date.strip() not in [
                "", "null", "undefined"
        ]:
            end_date = arrow.get(end_date).datetime
            q = q.filter(Candidate.passed_at <= end_date)
        if redshift_range_str is not None:
            redshift_range = ast.literal_eval(redshift_range_str)
            if not (isinstance(redshift_range,
                               (list, tuple)) and len(redshift_range) == 2):
                return self.error('Invalid argument for `redshiftRange`')
            if not (isinstance(redshift_range[0], (float, int))
                    and isinstance(redshift_range[1], (float, int))):
                return self.error('Invalid arguments in `redshiftRange`')
            q = q.filter(Obj.redshift >= redshift_range[0],
                         Obj.redshift <= redshift_range[1])

        if annotation_exclude_origin is not None:

            if annotation_exclude_date is None:
                right = (DBSession().query(Obj.id).join(Annotation).filter(
                    Annotation.origin == annotation_exclude_origin).subquery())
            else:
                expire_date = arrow.get(annotation_exclude_date).datetime
                right = (DBSession().query(Obj.id).join(Annotation).filter(
                    Annotation.origin == annotation_exclude_origin,
                    Annotation.modified >= expire_date,
                ).subquery())

            q = q.outerjoin(right,
                            Obj.id == right.c.id).filter(right.c.id.is_(None))

        if annotation_filter_list is not None:
            # Parse annotation filter list objects from the query string
            # and apply the filters to the query

            for item in re.split(r",(?={)", annotation_filter_list):
                try:
                    new_filter = json.loads(item)
                except json.decoder.JSONDecodeError:
                    return self.error(
                        "Could not parse JSON objects for annotation filtering"
                    )

                if "origin" not in new_filter:
                    self.error(
                        f"Invalid annotation filter list item {item}: \"origin\" is required."
                    )

                if "key" not in new_filter:
                    self.error(
                        f"Invalid annotation filter list item {item}: \"key\" is required."
                    )

                if "value" in new_filter:
                    value = new_filter["value"]
                    if isinstance(value, bool):
                        q = q.filter(
                            Annotation.origin == new_filter["origin"],
                            Annotation.data[new_filter["key"]].astext.cast(
                                Boolean) == value,
                        )
                    else:
                        # Test if the value is a nested object
                        try:
                            value = json.loads(value)
                            # If a nested object, we put the value through the
                            # JSON loads/dumps pipeline to get a string formatted
                            # like Postgres will for its JSONB ->> text operation
                            # For some reason, for example, not doing this will
                            # have value = { "key": "value" } (with the extra
                            # spaces around the braces) and cause the filter to
                            # fail.
                            value = json.dumps(value)
                        except json.decoder.JSONDecodeError:
                            # If not, this is just a string field and we don't
                            # need the string formatting above
                            pass
                        q = q.filter(
                            Annotation.origin == new_filter["origin"],
                            Annotation.data[new_filter["key"]].astext == value,
                        )
                elif "min" in new_filter and "max" in new_filter:
                    try:
                        min_value = float(new_filter["min"])
                        max_value = float(new_filter["max"])
                        q = q.filter(
                            Annotation.origin == new_filter["origin"],
                            Annotation.data[new_filter["key"]].cast(Float) >=
                            min_value,
                            Annotation.data[new_filter["key"]].cast(Float) <=
                            max_value,
                        )
                    except ValueError:
                        return self.error(
                            f"Invalid annotation filter list item: {item}. The min/max provided is not a valid number."
                        )
                else:
                    return self.error(
                        f"Invalid annotation filter list item: {item}. Should have either \"value\" or \"min\" and \"max\""
                    )

        if sort_by_origin is not None:
            sort_by_key = self.get_query_argument("sortByAnnotationKey", None)
            sort_by_order = self.get_query_argument("sortByAnnotationOrder",
                                                    None)
            # Define a custom sort order to have annotations from the correct
            # origin first, all others afterwards
            origin_sort_order = case(
                value=Annotation.origin,
                whens={sort_by_origin: 1},
                else_=None,
            )
            annotation_sort_criterion = (
                Annotation.data[sort_by_key].desc().nullslast()
                if sort_by_order == "desc" else
                Annotation.data[sort_by_key].nullslast())
            # Don't apply the order by just yet. Save it so we can pass it to
            # the LIMT/OFFSET helper function.
            order_by = [
                origin_sort_order.nullslast(),
                annotation_sort_criterion,
                Candidate.passed_at.desc().nullslast(),
                Obj.id,
            ]
        try:
            query_results = grab_query_results(
                q,
                total_matches,
                page,
                n_per_page,
                "candidates",
                order_by=order_by,
                include_photometry=include_photometry,
                include_spectra=include_spectra,
            )
        except ValueError as e:
            if "Page number out of range" in str(e):
                return self.error("Page number out of range.")
            raise
        matching_source_ids = (DBSession().query(Source.obj_id).filter(
            Source.group_id.in_(user_accessible_group_ids)).filter(
                Source.obj_id.in_(
                    [obj.id for obj in query_results["candidates"]])).all())
        candidate_list = []
        for obj in query_results["candidates"]:
            with DBSession().no_autoflush:
                obj.is_source = (obj.id, ) in matching_source_ids
                if obj.is_source:
                    obj.saved_groups = (DBSession().query(Group).join(
                        Source).filter(Source.obj_id == obj.id).filter(
                            Source.active.is_(True)).filter(
                                Group.id.in_(user_accessible_group_ids)).all())
                    obj.classifications = obj.get_classifications_readable_by(
                        self.current_user)
                obj.passing_group_ids = [
                    f.group_id for f in (DBSession().query(Filter).filter(
                        Filter.id.in_(user_accessible_filter_ids)).filter(
                            Filter.id.in_(DBSession().query(
                                Candidate.filter_id).filter(
                                    Candidate.obj_id == obj.id))).all())
                ]
                candidate_list.append(obj.to_dict())
                candidate_list[-1]["comments"] = sorted(
                    [
                        cmt.to_dict() for cmt in obj.get_comments_readable_by(
                            self.current_user)
                    ],
                    key=lambda x: x["created_at"],
                    reverse=True,
                )
                candidate_list[-1]["annotations"] = sorted(
                    obj.get_annotations_readable_by(self.current_user),
                    key=lambda x: x.origin,
                )
                candidate_list[-1]["last_detected_at"] = obj.last_detected_at
                candidate_list[-1]["gal_lat"] = obj.gal_lat_deg
                candidate_list[-1]["gal_lon"] = obj.gal_lon_deg
                candidate_list[-1][
                    "luminosity_distance"] = obj.luminosity_distance
                candidate_list[-1]["dm"] = obj.dm
                candidate_list[-1][
                    "angular_diameter_distance"] = obj.angular_diameter_distance

        query_results["candidates"] = candidate_list
        self.verify_permissions()
        return self.success(data=query_results)
Ejemplo n.º 47
0
    def season_period_dev(self, basetime, season_number, around_text):
        """
        Function:
            process the season "季度", get the deviation of the season
        Parameters::
            1.basetime: str, year time or year-month-day time
            2.season_number: str, '[1-4一二三四|下|上]'; list, ['一', '二', '三'...]
            3.around_text: str, '前|后' or '去'; list, ['去|今', '前|后']
        Return: start_year, end_year, start_month, end_month
        """
        #process the word '下' and '上'
        shift_season = 0
        year = 0

        #according to the pattern_basetime, no have the year in entity
        matcher = re.match(self.pattern_basetime, basetime)
        basemonth = ''
        if (matcher):
            basemonth = matcher.group(2)

            #have the word "去|今" and "前|后"
            if (type(around_text) == list):
                year = arrow.get(basetime).year
                start_year, end_year, start_month, end_month = self.get_dev_aroundlist(
                    year, season_number, around_text)
                return start_year, end_year, start_month, end_month

            #have the word "前|后"
            if (around_text == u'前' or around_text == u'后'
                    or around_text == u'上' or around_text == u'下'):
                year = arrow.get(basetime).year
                start_year, end_year, start_month, end_month = self.get_dev_around(
                    year, basemonth, season_number, around_text)
                return start_year, end_year, start_month, end_month

            #have the word "去"
            elif (around_text == u'去'):
                year = arrow.get(basetime).year
                start_year, end_year, start_month, end_month = self.get_dev_around(
                    year, basemonth, season_number, around_text)
                return start_year, end_year, start_month, end_month

            elif (type(season_number) == list):
                start_month, end_month = self.get_dev_numlist(season_number)
                year = arrow.get(basetime).year
                return year, year, start_month, end_month
            else:
                #have the "上|下"
                year, shift_season = self.get_dev_up2down(
                    basemonth, season_number)
                year = arrow.get(basetime).year + year

        #process the word '前|后 1-4一二三四', have the year in entity
        else:
            if (around_text == u'前' or around_text == u'后'):
                #the start_month = 1, and change the end_month
                if (around_text == u'前'):
                    around_text = u'后'
                    basemonth = '12'
                #the end_month = 12, and change the start_month
                else:
                    around_text = u'前'
                    basemonth = '1'
                _, _, start_month, end_month = self.get_dev_around(
                    year, basemonth, season_number, around_text)
                year = int(basetime)
                return year, year, start_month, end_month

            elif (type(season_number) == list):
                #have "一、二、三..." in entity
                start_month, end_month = self.get_dev_numlist(season_number)
                year = int(basetime)
                return year, year, start_month, end_month

            #have the year in entity, no have the "前|后", have the "上|下"
            else:
                year, shift_season = self.get_dev_up2down('1', season_number)
                year = int(basetime)

        start_month = self.season_dict[shift_season][0]
        end_month = self.season_dict[shift_season][1]
        return year, year, start_month, end_month
Ejemplo n.º 48
0
def scheduler(broker: Broker = None):
    """
    Creates a task from a schedule at the scheduled time and schedules next run
    """
    if not broker:
        broker = get_broker()
    close_old_django_connections()
    try:
        with db.transaction.atomic(using=Schedule.objects.db):
            for s in (Schedule.objects.select_for_update().exclude(
                    repeats=0).filter(next_run__lt=timezone.now())):
                args = ()
                kwargs = {}
                # get args, kwargs and hook
                if s.kwargs:
                    try:
                        # eval should be safe here because dict()
                        kwargs = eval(f"dict({s.kwargs})")
                    except SyntaxError:
                        kwargs = {}
                if s.args:
                    args = ast.literal_eval(s.args)
                    # single value won't eval to tuple, so:
                    if type(args) != tuple:
                        args = (args, )
                q_options = kwargs.get("q_options", {})
                if s.hook:
                    q_options["hook"] = s.hook
                # set up the next run time
                if s.schedule_type != s.ONCE:
                    next_run = arrow.get(s.next_run)
                    while True:
                        if s.schedule_type == s.MINUTES:
                            next_run = next_run.shift(
                                minutes=+(s.minutes or 1))
                        elif s.schedule_type == s.HOURLY:
                            next_run = next_run.shift(hours=+1)
                        elif s.schedule_type == s.DAILY:
                            next_run = next_run.shift(days=+1)
                        elif s.schedule_type == s.WEEKLY:
                            next_run = next_run.shift(weeks=+1)
                        elif s.schedule_type == s.MONTHLY:
                            next_run = next_run.shift(months=+1)
                        elif s.schedule_type == s.QUARTERLY:
                            next_run = next_run.shift(months=+3)
                        elif s.schedule_type == s.YEARLY:
                            next_run = next_run.shift(years=+1)
                        elif s.schedule_type == s.CRON:
                            if not croniter:
                                raise ImportError(
                                    _("Please install croniter to enable cron expressions"
                                      ))
                            next_run = arrow.get(
                                croniter(s.cron, timezone.now()).get_next())
                        if Conf.CATCH_UP or next_run > arrow.utcnow():
                            break
                    # arrow always returns a tz aware datetime, and we don't want
                    # this when we explicitly configured django with USE_TZ=False
                    s.next_run = (next_run.datetime if settings.USE_TZ else
                                  next_run.datetime.replace(tzinfo=None))
                    s.repeats += -1
                # send it to the cluster
                scheduled_broker = broker
                try:
                    scheduled_broker = get_broker(q_options["broker_name"])
                except:  # invalid broker_name or non existing broker with broker_name
                    pass
                q_options["broker"] = scheduled_broker
                q_options["group"] = q_options.get("group", s.name or s.id)
                kwargs["q_options"] = q_options
                s.task = django_q.tasks.async_task(s.func, *args, **kwargs)
                # log it
                if not s.task:
                    logger.error(
                        _(f"{current_process().name} failed to create a task from schedule [{s.name or s.id}]"
                          ))
                else:
                    logger.info(
                        _(f"{current_process().name} created a task from schedule [{s.name or s.id}]"
                          ))
                # default behavior is to delete a ONCE schedule
                if s.schedule_type == s.ONCE:
                    if s.repeats < 0:
                        s.delete()
                        continue
                    # but not if it has a positive repeats
                    s.repeats = 0
                # save the schedule
                s.save()
    except Exception as e:
        logger.error(e)
Ejemplo n.º 49
0
def next_day(isotext):
    """
    ISO date + 1 day (used in query to Google calendar)
    """
    as_arrow = arrow.get(isotext)
    return as_arrow.replace(days=+1).isoformat()
Ejemplo n.º 50
0
Nose tests for acp_times.py

We cannot test for randomness here (no effective oracle),
but we can test that the elements in the returned string
are correct.
"""

import arrow
import acp_times

import nose  # Testing framework
import logging
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.WARNING)
log = logging.getLogger(__name__)

startTime = arrow.get("2020-11-09T00:00:00-08:00")


def same(s, t):
    """
    Same times
    in two strings s and t.
    """
    return s == t


def test_lt_60_close():
    assert same("2020-11-09T01:45:00-08:00",
                acp_times.close_time(15, 200, startTime))
    assert same("2020-11-09T03:15:00-08:00",
                acp_times.close_time(45, 100, startTime))
Ejemplo n.º 51
0
 def time_ago(self):
     time_ago = arrow.get(self.created_at)
     return time_ago.humanize()
Ejemplo n.º 52
0
def format_arrow_time( time ):
    try:
        normal = arrow.get( time )
        return normal.format("HH:mm")
    except:
        return "(bad time)"
Ejemplo n.º 53
0
    def parse_nwfsc_txt_bcs_data(self, angles='x'):
        """
        Method to parse the contents of the NWFSC/FRAM BCS data stream.  The ascii data is passed in as the
        contents variable
        :param angles:  x / xy - whether to return only the X or X and Y angles
        :return: tilt_values - N x 3 dimensional array of date-time, X tilt, Y tilt
        """
        # byte_count_arr = [int(x.strip('\r\nH')) if int(x.strip('\r\nH')) else x.strip('\r\nH')
        # 					  for x in self.content_list[0].split(' L')]
        # byte_count = byte_count_arr[0]*65536 + byte_count_arr[1]
        byte_count = self.get_byte_count(byte_string=self.content_list[0])
        # print('Total byte size:', byte_count, 'bytes')

        beginning = re.search('FF\d+FF',
                              self.content_list[1]).group().strip('F')
        ending = re.search('EE\d+EEZ{0,2}',
                           self.content_list[1]).group().strip('EZ')
        count = ending[20:25]
        data = re.search('FF\d+EE', self.content_list[1]).group().strip('FE')

        # print('File Data:')
        # print('\tBeginning:', beginning)
        # print('\tEnding:', ending)
        # print('\tCount:', count)

        if time.daylight:
            offset_hour = -time.altzone / 3600
        else:
            offset_hour = -time.timezone / 3600

        # start_datetime = beginning[8:10] + '/' + beginning[10:12] + '/' + beginning[12:14] + \
        #             ' ' + beginning[14:16] + ':' + beginning[16:18] + ':' + beginning[18:20] + \
        #             ' ' + '%02d:00' % offset_hour
        # self.start_datetime = parser.parse(start_datetime)

        offset_hour = '%02d:00' % offset_hour

        offset_hour = "-07:00"
        self.start_datetime = arrow.get(beginning[8:20] + offset_hour,
                                        'MMDDYYHHmmssZZ')

        # print('\nParsed Results:  (Note: parsed times are in ISO format, UTC time zone (7 hours ahead)')
        # print('\tStart Date-Time:', self.start_datetime.isoformat())

        # TODO Convert 2 angle values into 1 angle between 0 and 90
        # Iterate through the data, turning it into a N x 3 lis
        tilt_values = []
        current_datetime = self.start_datetime
        for values in re.findall(".{6}", data):

            if angles == 'x':

                value = int(values[0:3])

                if 0 <= value < 270:
                    value = 90 - value
                else:
                    value = 450 - value

                # If the value is not between 0 <= x <= 359, then it is a bad value, indicate so
                # TODO

                # else:
                # 	value = 999		# Bad Data

                # Values have been converted between -179 <= x <= 180
                # Set overall boundaries for what will be returned and plotted in Integrator
                if value < -10:
                    value = -10
                elif value > 100:
                    value = 100

                # tilt_values.append([current_datetime.astimezone().isoformat(), value])
                tilt_values.append([current_datetime.isoformat(), value])

            elif angles == 'xy':
                # tilt_values.append([current_datetime.astimezone().isoformat(), int(values[0:3])-180, int(values[3:6])-180])
                tilt_values.append([
                    current_datetime.isoformat(),
                    int(values[0:3]) - 180,
                    int(values[3:6]) - 180
                ])

            current_datetime.shift(seconds=1)
            # current_datetime += timedelta(seconds=1)

        # Format:  date-time, X, Y
        # print(tilt_values)
        # print('angle:', angles)
        return tilt_values
Ejemplo n.º 54
0
def test_date_dif_in_days():
    date_1 = arrow.get('2015-12-23 18:40:48', 'YYYY-MM-DD HH:mm:ss')
    date_2 = arrow.get('2017-11-15 13:18:20', 'YYYY-MM-DD HH:mm:ss')
    diff = jh.date_diff_in_days(date_1, date_2)
    assert diff == 692
Ejemplo n.º 55
0
    def get_end_datetime(self):
        """
        Method to return the ending datetime of the data file
        :return:
        """
        if self.content_list == []:
            return

        if self.sensor_type is None or self.sensor_type == '':
            return

        if self.sensor_type == 'afsc':

            header = self.content_list[1]
            data = self.content_list[2:len(self.content_list)]

            datetime_col = -1
            for i, item in enumerate(header):
                if 'date time' in item.lower():
                    datetime_col = i
                    timezone = item.split(',')[1].strip()
                    offset_hour = '00:00'
                    if 'gmt' in timezone.lower():
                        offset_hour = timezone.strip('GMT')

                    end_datetime = arrow.get(data[len(data) -
                                                  1][datetime_col] +
                                             offset_hour)
                    return end_datetime

                    # end_datetime = parser.parse(data[len(data)-1][datetime_col] + offset_hour)
                    # return self.tc.local_to_utc(local_time=end_datetime)
                else:
                    continue  # Keep looking for the date_time column

                Logger.error(
                    'BcsReader: Did not find end "date_time" column in BCS header.'
                )
                return None

        elif self.sensor_type == 'nwfsc_csv':

            return None

        elif self.sensor_type == 'nwfsc_txt':

            ending = re.search('EE\d+EEZ{0,2}',
                               self.content_list[1]).group().strip('EZ')
            if ending:
                if time.daylight:
                    offset_hour = -time.altzone / 3600
                else:
                    offset_hour = -time.timezone / 3600
                end_datetime = ending[8:10] + '/' + ending[10:12] + '/' + ending[12:14] + \
                            ' ' + ending[14:16] + ':' + ending[16:18] + ':' + ending[18:20] + \
                            ' ' + '%02d:00' % offset_hour
                end_datetime = arrow.get(end_datetime)
                return end_datetime

                end_datetime = parser.parse(end_datetime)
                # return self.tc.local_to_utc(local_time=end_datetime)

        return None
Ejemplo n.º 56
0
def query_api_statuses(query: str,
                       elasticsearch_url: str,
                       elasticuser: str = None,
                       elasticpass: str = None,
                       elasticsearch_index: str = STATUSES_INDEX,
                       time_sleep: float = 1.1,
                       since: str = '0'):
    """Goes to twitter API an get status info and saves into a json file (in "json" dir) and if Elasticsearch is identified send it too
    
    Arguments:
        query {str} -- Proposed query to obtain statuses on Twitter
        elasticsearch_url {str} -- [description]
    
    Keyword Arguments:
        elasticuser {str} -- [description] (default: {None})
        elasticpass {str} -- [description] (default: {None})
        elasticsearch_index {str} -- [description] (default: {STATUSES_INDEX})
        since {str} -- Status ID to start twitter extraction (default: {'0'})        
    """
    # Create a connection with Elastic
    if elasticsearch_url is not None:
        es = Elasticsearch(elasticsearch_url)
        logger.info(es.info())
    else:
        es = None

    # Check if time_sleep is more than 1.1 secs
    try:
        assert time_sleep >= 1.1
    except:
        logger.error("Time Sleep less than 1.1 secs (minimum) ")
        raise err

    api = twitter.Api(consumer_key=CONSUMER_KEY,
                      consumer_secret=CONSUMER_SECRET,
                      access_token_key=ACCESS_TOKEN_KEY,
                      access_token_secret=ACCESS_TOKEN_SECRET,
                      tweet_mode='extended')

    since_id = int(since)

    logger.info("Scrapping query on Twitter")

    df = scrape_twitter_by_date(query,
                                start_date=arrow.now().format('YYYY-MM-DD'),
                                end_date=arrow.now().format('YYYY-MM-DD'))

    if df is not None:
        lst_statuses_ids = df['STATUS_ID'].tolist()
        hydrataded_statuses = hydratate_status(api, lst_statuses_ids)
        ## Save all jsons to file and load into Elastic
        logger.info("Processing Statuses from Twitter API to save jsons")
        for c_status_data in tqdm(hydrataded_statuses):
            cur_dict = Cut(c_status_data.AsDict())
            cur_id_str = cur_dict['id_str']

            # Fix twitter dates to more 'standart' date format
            list_all_keys_w_dots = dotter(cur_dict.data, '', [])
            try:
                for created_at_keys in list_all_keys_w_dots:
                    if 'created_at' in created_at_keys:
                        cur_dt = arrow.get(cur_dict[created_at_keys],
                                           TWITTER_DATETIME_PATTERN)
                        cur_dict[created_at_keys] = cur_dt.format(
                            "YYYY-MM-DDTHH:MM:SS") + "Z"
            except:
                logger.error("Error parsing dates on %s" % cur_id_str)

            cur_json = json.dumps(cur_dict.data, indent=4)

            save_json(cur_json, "./json/" + cur_id_str + ".json")
            if es is not None:
                logger.debug("Indexing: %s " % cur_id_str)
                es.index(
                    index=elasticsearch_index,
                    #ignore=400,
                    doc_type='status',
                    id=cur_id_str,
                    body=cur_json)
 def __post_init__(self):
     self.time = arrow.get(self.time).datetime
Ejemplo n.º 58
0
    def parse_afsc_bcs_data(self):
        """
        Method to parse the contents of the AFSC-provided BCS data stream.  This data uses the Onset Hoboware
        U22-001 data logger capability (http://www.onsetcomp.com/).  The data is converted from a proprietary
        format to a csv file using the Hoboware software. The output CSV file has two columns of data
        consisting of date-time and temperature readings (that are in turn converted to tilt values).
        :param contents:
        :return: tilt_values: N x 2 array of date-time + tilt values
        """
        metadata = self.content_list[0]
        header = self.content_list[1]
        data = self.content_list[2:len(self.content_list)]
        offset_hour = '00:00'

        datetime_col = -1
        temp_col = -1
        voltage_col = -1
        bcs_offset_hack = 0  # hours
        for i, item in enumerate(header):
            if 'date time' in item.lower():
                datetime_col = i
                tzone = item.split(',')[1].strip()
                if 'gmt' in tzone.lower():
                    offset_hour = tzone.strip('GMT')

                    # FIELD-581 Confirm this fix is still required with newest version of Hoboware
                    if offset_hour == '-08:00':
                        offset_hour = '-07:00'
                        logging.info(
                            '\t\t\t\tBCS Parsing. Offset hour switched to -07:00 to overcome Hoboware time zone issue'
                        )
                        bcs_offset_hack = 1  # hour

            elif 'temp' in item.lower():
                temp_col = i
            elif 'voltage' in item.lower():
                voltage_col = i

        tilt_values = []
        if datetime_col >= 0:
            for row in data:
                # date_time = parser.parse(row[datetime_col] + offset_hour).isoformat()
                #date_time = self.tc.local_to_utc_as_iso(local_time=parser.parse(row[datetime_col] + offset_hour))
                # local_time = parser.parse(row[datetime_col] + offset_hour) + timedelta(hours=bcs_offset_hack)
                # date_time = self.tc.local_to_utc_as_iso(local_time=local_time)
                try:
                    date_time = arrow.get(
                        row[datetime_col] + offset_hour,
                        'MM/DD/YY hh:mm:ss AZZ').shift(
                            hours=bcs_offset_hack).isoformat()
                except:
                    logging.error(
                        f"BcsReader error parsing date_time: {row[datetime_col]}, {offset_hour}"
                    )
                    continue

                if voltage_col >= 0:
                    try:
                        voltage = float(row[voltage_col])
                        angle = self.convert_voltage_to_angle(voltage=voltage)
                    except:
                        continue

                elif temp_col >= 0:
                    try:
                        temp = float(row[temp_col])
                        angle = self.convert_temp_to_angle(temp=temp)
                    except:
                        continue

                else:
                    continue

                if angle is not None:
                    tilt_values.append([date_time, angle])

        return tilt_values
Ejemplo n.º 59
0
def worker(job_json):
    """
    For every incoming message, this worker function is called. Be extremely
    careful not to do anything CPU-intensive here, or you will see blocking.
    Sockets are async under gevent, so those are fair game.
    """
    market_json = zlib.decompress(job_json)
    market_data = simplejson.loads(market_json)

    try:
        resultType = market_data['resultType']
    except KeyError as e:
        log("Error getting result type")
        return
    
    if market_data['generator']['name']=='EveData.Org':
        log("Ignoring evedata.org")
        return

    cursor, conn = dbcon.connect()
    if resultType == 'orders':
        insert_sql = "INSERT INTO orders\
                (typeid, regionid, price, volremaining, orderrange, orderid, volentered,\
                minvolume, bid, issuedate, duration, stationid, solarsystemid) VALUES"
        values = "('%d', '%d', '%f', '%d', '%d', '%d', '%d', '%d', '%d', '%s', '%d', '%d', '%d')," 
        delete_sql = "DELETE FROM orders WHERE regionid = %d AND typeid = %d"

        try:
            for rowset in market_data['rowsets']:
                if len(rowset['rows']) <= 0:
                    continue
                try:
                    cursor.execute(delete_sql % (rowset['regionID'], rowset['typeID']))
                    conn.commit()
                except Error as e:
                    log("orders : " + delete_sql % (rowset['regionID'], rowset['typeID']))
                    log("MySQL Error [%d]: %s" % (e.args[0], e.args[1]))
                    conn.rollback()
                for row in rowset['rows']:
                    insert_sql += values % (rowset['typeID'], rowset['regionID'], row[0], row[1], row[2], row[3], row[4], row[5], row[6], arrow.get(row[7]).datetime.strftime('%Y-%m-%d %H:%M:%S'), row[8], row[9], row[10])
                try:
                    cursor.execute(insert_sql[:-1])
                    conn.commit()
                except:
                    log("Error inserting orders, rolling back")
                    log(simplejson.dumps(market_data, indent=4*' '))
                    conn.rollback()
        except KeyError as e:
            log("Error parsing order rowsets")
            conn.close()
            return
    elif resultType == 'history': #disable quickly. this is stupid
        insert_sql = "INSERT INTO history (typeid, regionid, issuedate, orders, low, high, average, quantity) VALUES"
        values = "('%d', '%d', '%s', '%d', '%d', '%d', '%d', '%d'),"
        delete_sql = "DELETE FROM history WHERE regionid = %d AND typeid = %d" 

        try:
            for rowset in market_data['rowsets']:
                try:
                    cursor.execute(delete_sql % (rowset['regionID'], rowset['typeID']))
                    conn.commit()
                except Error as e:
                    log("history : " + delete_sql % (rowset['regionID'], rowset['typeID']))
                    log("MySQL Error [%d]: %s" % (e.args[0], e.args[1]))
                    conn.rollback()
                for row in rowset['rows']:
                    print row
                    insert_sql += values % (rowset['typeID'], rowset['regionID'], arrow.get(row[0]).datetime.strftime('%Y-%m-%d %H:%M:%S'), row[1], row[2], row[3], row[4], row[5])
                try:
                    cursor.execute(insert_sql[:-1])
                    conn.commit()
                except:
                    log("Error inserting history, rolling back")
                    log(simplejson.dumps(market_data, indent=4*' '))
                    conn.rollback()
        except KeyError as e:
            log("Error parsing history rowsets")
            conn.close()
            return
    conn.close()
Ejemplo n.º 60
0
    def get_start_datetime(self):
        """
        Method to return the start datetime of the given file/contents
        :return:
        """
        if self.content_list == []:
            return

        if self.sensor_type is None or self.sensor_type == '':
            return

        if self.sensor_type == 'afsc':

            header = self.content_list[1]
            data = self.content_list[2:len(self.content_list)]

            datetime_col = -1
            for i, item in enumerate(header):
                if 'date time' in item.lower():
                    datetime_col = i
                    tzone = item.split(',')[1].strip()
                    offset_hour = '00:00'
                    if 'gmt' in tzone.lower():
                        offset_hour = tzone.strip('GMT')

                    # TODO Todd Hay - Confirm that I'm getting the correct date in local time zone
                    start_datetime = arrow.get(data[0][datetime_col] +
                                               offset_hour)
                    return start_datetime

                    # start_datetime = parser.parse(data[0][datetime_col] + offset_hour)
                    # return self.tc.local_to_utc(local_time=start_datetime)
                    # return self.local_to_utc(local_time=start_datetime)
                else:
                    continue  # Keep looking for the date_time column
            logging.error(
                'BcsReader: Did not find start "date_time" column in BCS header.'
            )
            return None

        elif self.sensor_type == 'nwfsc_csv':

            return None

        elif self.sensor_type == 'nwfsc_txt':

            beginning = re.search('FF\d+FF',
                                  self.content_list[1]).group().strip('F')
            if beginning:
                if time.daylight:
                    offset_hour = -time.altzone / 3600
                else:
                    offset_hour = -time.timezone / 3600

                start_datetime = beginning[8:10] + '/' + beginning[10:12] + '/' + beginning[12:14] + \
                            ' ' + beginning[14:16] + ':' + beginning[16:18] + ':' + beginning[18:20] + \
                            ' ' + '%02d:00' % offset_hour
                start_datetime = arrow.get(start_datetime)
                return start_datetime

                # start_datetime = parser.parse(start_datetime)
                # return self.tc.local_to_utc(local_time=start_datetime)

        return None