Пример #1
0
def main():
    argv = docopt(USAGE)
    if argv["--start"]:
        start = pacific_to_utc(argv["--start"])
    elif argv["--last"]:
        start = datetime.strftime(
                datetime.utcnow() - timedelta(minutes=int(argv["--last"])), "%Y-%m-%dT%H:%M:%SZ")
    else:
        start = datetime.strftime(datetime.utcnow() - timedelta(days=7), "%Y-%m-%dT%H:%M:%SZ")

    if argv["--end"]:
        end = pacific_to_utc(argv["--end"])
    else:
        end = datetime.strftime(datetime.utcnow(),"%Y-%m-%dT%H:%M:%SZ")

    pager = PagerDuty(argv['<subdomain>'], argv['<api-token>'], argv['<policy>'])
    for command in ['all','wakeups','flakes']:
        if argv[command]:
            incidents = pager.do_list(command, argv['--no-thurs'], since=start, until=end)
            incident_list = list(incidents)
            (incident_list_prod, incident_list_staging)=segregation(incident_list)
            if incidents:
                if argv['--email']:
                    email_output(incident_list_prod, incident_list_staging, argv['--top'])
                elif argv['--top']:
                    pprint_rankings(top(incident_list_prod, argv['--top']))
                    pprint_rankings(top(incident_list_staging, argv['--top']))
                else:
                    pprint_incidents(incident_list_prod)
                    pprint_incidents(incident_list_staging)


    if argv['mtr']:
        print pager.get_mtr(since=start, until=end)
Пример #2
0
    def call(self, function, params=None):
        self.requestPerMinute += 1
        now = datetime.utcnow()

        if self.requestPerMinute >= self.requestLimit:
            waittime = 60 - now.second
            logging.warning("Limit for request per minute exceeded. Waiting for: {0} sec.".format(waittime))
            time.sleep(waittime)
            now = datetime.utcnow()

        if self.checkMinute != now.minute:
            self.requestPerMinute = 0
            self.checkMinute = now.minute

        payload = ''
        try:
            p = "" if not params else '?' + "&".join(
                ["{key}={value}".format(key=k, value=v) for (k, v) in params.iteritems()])
            url = "{base}.{func}{params}".format(base=self.baseConfig["url"], func=function, params=p)
            logging.debug("{0} {1} API call:{2}".format(self.checkMinute, self.requestPerMinute, url))
            request = urllib2.Request(url, None, self.baseConfig["headers"])
            stream = urllib2.urlopen(request)
            payload = stream.read()
            data = json.loads(payload)
            if isinstance(data, dict) and 'ruid' in data:
                logging.error('Api call failed with error: {0} Code: {1}'.format(data['message'], data['code']))
                return None
            return data

        except Exception as e:
            logging.error('Error: {0} Context: {1}'.format(e, payload))
            return None
Пример #3
0
 def _update_session_last_used(self):
     if self.key in ApiSession.sessionDict:
         ApiSession.sessionDict[self.key]["last_used"] = \
             datetime.utcnow()
     else:
         ApiSession.sessionDict[self.key] = \
             {'api': self, 'last_used': datetime.utcnow()}
Пример #4
0
    def run(self, **kwargs):
        logger.info("TASK :: alarm_dispatcher")

        # Select Alarm where date_start_notice >= now() - 60 minutes and <= now() + 5 minutes
        start_time = datetime.utcnow().replace(tzinfo=utc) + relativedelta(minutes=-60)
        end_time = datetime.utcnow().replace(tzinfo=utc) + relativedelta(minutes=+5)
        alarm_list = Alarm.objects.filter(date_start_notice__range=(start_time, end_time),
                                          status=ALARM_STATUS.PENDING).order_by('date_start_notice')
        # Browse all the Alarm found
        for obj_alarm in alarm_list:
            # Check if there is an existing Event
            if obj_alarm.event:
                obj_alarm.status = ALARM_STATUS.IN_PROCESS
                obj_alarm.save()

                second_towait = obj_alarm.get_time_diff()
                # If second_towait negative then set to 0 to be run directly
                if second_towait <= 0:
                    perform_alarm.delay(obj_alarm.event, obj_alarm)
                else:
                    # Call the Alarm in the future
                    perform_alarm.apply_async(
                        args=[obj_alarm.event, obj_alarm], countdown=second_towait)
            else:
                logger.error("There is no Event attached to this Alarm: %d" % obj_alarm.id)
                ## Mark the Alarm as ERROR
                obj_alarm.status = ALARM_STATUS.FAILURE
                obj_alarm.save()
Пример #5
0
 def latest(self):
     """ Return newest record (limited to last week). """
     return self.query_legacy(
         datetime.utcnow()  - timedelta(7),
         datetime.utcnow(),
         time_near=datetime.utcnow()
     )
Пример #6
0
def valid_admin_cookie(cookie):
    if g.read_only_mode:
        return (False, None)

    # parse the cookie
    try:
        first_login, last_request, hash = cookie.split(',')
    except ValueError:
        return (False, None)

    # make sure it's a recent cookie
    try:
        first_login_time = datetime.strptime(first_login, COOKIE_TIMESTAMP_FORMAT)
        last_request_time = datetime.strptime(last_request, COOKIE_TIMESTAMP_FORMAT)
    except ValueError:
        return (False, None)

    cookie_age = datetime.utcnow() - first_login_time
    if cookie_age.total_seconds() > g.ADMIN_COOKIE_TTL:
        return (False, None)

    idle_time = datetime.utcnow() - last_request_time
    if idle_time.total_seconds() > g.ADMIN_COOKIE_MAX_IDLE:
        return (False, None)

    # validate
    expected_cookie = c.user.make_admin_cookie(first_login, last_request)
    return (constant_time_compare(cookie, expected_cookie),
            first_login)
Пример #7
0
 def _get_group(self, cr, uid, orderpoint, context=None):
     """
         Will return the groups and the end dates of the intervals of the purchase calendar
         that need to be executed now.
         If a purchase calendar is defined, it should give the
         :return [(date, group)]
     """
     #Check if orderpoint has last execution date and calculate if we need to calculate again already
     calendar_obj = self.pool.get("resource.calendar")
     att_obj = self.pool.get("resource.calendar.attendance")
     group = False
     context = context or {}
     context['no_round_hours'] = True
     date = False
     now_date = self._convert_to_tz(cr, uid, datetime.utcnow(), context=context)
     res_intervals = []
     if orderpoint.purchase_calendar_id and orderpoint.purchase_calendar_id.attendance_ids:
         if orderpoint.last_execution_date:
             new_date = datetime.strptime(orderpoint.last_execution_date, DEFAULT_SERVER_DATETIME_FORMAT)
         else:
             new_date = datetime.utcnow()
         # Convert to timezone of user
         new_date = self._convert_to_tz(cr, uid, new_date, context=context)
         intervals = calendar_obj._schedule_days(cr, uid, orderpoint.purchase_calendar_id.id, 1, new_date, compute_leaves=True, context=context)
         for interval in intervals:
             # If last execution date, interval should start after it in order not to execute the same orderpoint twice
             # TODO: Make the interval a little bigger
             if (orderpoint.last_execution_date and (interval[0] > new_date and interval[0] < now_date)) or (not orderpoint.last_execution_date and interval[0] < now_date and interval[1] > now_date):
                 group = att_obj.browse(cr, uid, interval[2], context=context).group_id.id
                 date = interval[1]
                 res_intervals += [(date, group), ]
     else:
         return [(now_date, None)]
     return res_intervals
Пример #8
0
    def __init__(self, title=None, authors=None,
                 publisher=None, publication_date=datetime.utcnow(),
                 types=[], content_url=None, valid=True,
                 updated_by=None):
        """
        Constructs a new Publication instance.

        Arguments:
            title (string): The publication title.
            authors (Contact[]): The list of authors.
            publisher (string): The publisher of the publication.
            publication_date (DateTime): The date of publication.
            types (OrgTypeEnum[]): The types associated with the publication.
            content_url (string): The URL for the publication.
            valid (boolean): The validity of the publication.
            updated_by (ObjectId): The object id of the publication's last updater.
        """
        self.title = title
        self.authors = authors
        self.publisher = publisher
        self.publication_date = publication_date
        self.types = types
        self.content_url = content_url
        self.valid = valid
        self.last_updated = datetime.utcnow()
        self.updated_by = updated_by
Пример #9
0
def home(req):

    form = GetCustomerForm(req.GET)

    # The data provided on input
    cust_id = req.GET.get('cust_id')
    client_type = req.GET.get('client_type')

    if cust_id:

        # What to invoke the service with
        request = {'cust_id':cust_id}

        # When was the service invoked
        before = datetime.utcnow()

        # Invoke the service with the user-provided input using a selected client
        if client_type == 'AnyServiceInvoker':
            response = req.client_any.invoke('customer.get1', request)
        else:
            response = req.client_json.invoke(request)

        # How long we waited for the response, in milliseconds
        time = (datetime.utcnow() - before).total_seconds() * 1000

    else:
        time, response = None, None

    return TemplateResponse(req, 'customer.html',
        {'time':time, 'form':form, 'response':response})
Пример #10
0
 def run_task(self, **kwargs):
   Session.merge(self.task)
   self.task.start_time = datetime.utcnow()
   self.task.ident = threading.get_ident()
   self.task.status = TaskStatus.running.value
   Session.merge(self.task)
   Session.commit()
   try:
     self.run_function(**kwargs)
     self.task.log = self.log.messages
     self.task.end_time = datetime.utcnow()
     self.task.status = TaskStatus.finished.value
     self.task.result = TaskResult.success.value
     self.task = Session.merge(self.task)
     Session.commit()
   except Exception as e:
     self.task.log = self.log.messages
     self.task.tb = traceback.format_exc()
     self.task.end_time = datetime.utcnow()
     self.task.status = TaskStatus.finished.value
     self.task.result = TaskResult.fail.value
     self.task = Session.merge(self.task)
     Session.commit()
     defect = jira.defect_for_exception(
       "Background Task Error: {}".format(
         self.task.name),
       e, tb=traceback.format_exc(),
       username=self.task.username)
     self.task.defect_ticket = defect.key
     self.task = Session.merge(self.task)
     Session.commit()
   finally:
     Session.remove()
Пример #11
0
    def page(self):
        changemsg = []
        if cherrypy.session.id != cherrypy.session.originalid:
            if cherrypy.session.originalid is None:
                changemsg.append(
                    'Created new session because no session id was given.')
            if cherrypy.session.missing:
                changemsg.append(
                    'Created new session due to missing '
                    '(expired or malicious) session.')
            if cherrypy.session.regenerated:
                changemsg.append('Application generated a new session.')

        try:
            expires = cherrypy.response.cookie['session_id']['expires']
        except KeyError:
            expires = ''

        return page % {
            'sessionid': cherrypy.session.id,
            'changemsg': '<br>'.join(changemsg),
            'respcookie': cherrypy.response.cookie.output(),
            'reqcookie': cherrypy.request.cookie.output(),
            'sessiondata': list(cherrypy.session.items()),
            'servertime': (
                datetime.utcnow().strftime('%Y/%m/%d %H:%M') + ' UTC'
            ),
            'serverunixtime': calendar.timegm(datetime.utcnow().timetuple()),
            'cpversion': cherrypy.__version__,
            'pyversion': sys.version,
            'expires': expires,
        }
Пример #12
0
    def test_delete_index(self):
        mother_case_id = uuid.uuid4().hex
        _submit_case_block(
            True, mother_case_id, user_id='user1', owner_id='owner1', case_type='mother',
            case_name='mother', date_modified=datetime.utcnow()
        )

        child_case_id = uuid.uuid4().hex
        _submit_case_block(
            True, child_case_id, user_id='user1', owner_id='owner1', case_type='child',
            case_name='child', date_modified=datetime.utcnow(), index={
                'mom': ('mother', mother_case_id)
            }
        )

        case = self.casedb.get_case(child_case_id)
        self.assertEqual(len(case.indices), 1)

        _submit_case_block(
            False, child_case_id, user_id='user1', date_modified=datetime.utcnow(), index={
                'mom': ('mother', '')
            }
        )
        case = self.casedb.get_case(child_case_id)
        self.assertEqual(len(case.indices), 0)
Пример #13
0
    def test_update_case(self):
        case_id = uuid.uuid4().hex
        opened_on = datetime.utcnow()
        _submit_case_block(
            True, case_id, user_id='user1', owner_id='owner1', case_type='demo',
            case_name='create_case', date_modified=opened_on, update={
                'dynamic': '123'
            }
        )

        modified_on = datetime.utcnow()
        _submit_case_block(
            False, case_id, user_id='user2', owner_id='owner2',
            case_name='update_case', date_modified=modified_on, date_opened=opened_on, update={
                'dynamic': '1234'
            }
        )

        case = self.casedb.get_case(case_id)
        self.assertEqual(case.owner_id, 'owner2')
        self.assertEqual(case.name, 'update_case')
        self.assertEqual(coerce_to_datetime(case.opened_on), coerce_to_datetime(opened_on))
        self.assertEqual(case.opened_by, 'user1')
        self.assertEqual(case.modified_on, modified_on)
        self.assertEqual(case.modified_by, 'user2')
        self.assertTrue(case.server_modified_on > modified_on)
        self.assertFalse(case.closed)
        self.assertIsNone(case.closed_on)
        self.assertEqual(case.dynamic_case_properties()['dynamic'], '1234')
Пример #14
0
    def download_and_commit_uids(self, crispin_client, uids):
        start = datetime.utcnow()
        raw_messages = crispin_client.uids(uids)
        if not raw_messages:
            return
        new_uids = set()
        with self.syncmanager_lock:
            with session_scope() as db_session:
                account = Account.get(self.account_id, db_session)
                folder = Folder.get(self.folder_id, db_session)
                raw_messages = self.__deduplicate_message_object_creation(
                    db_session, raw_messages, account)
                if not raw_messages:
                    return 0

                for msg in raw_messages:
                    uid = self.create_message(db_session, account, folder,
                                              msg)
                    if uid is not None:
                        db_session.add(uid)
                        db_session.commit()
                        new_uids.add(uid)

        log.info('Committed new UIDs',
                 new_committed_message_count=len(new_uids))
        # If we downloaded uids, record message velocity (#uid / latency)
        if self.state == "initial" and len(new_uids):
            self._report_message_velocity(datetime.utcnow() - start,
                                          len(new_uids))

        if self.is_first_message:
            self._report_first_message()
            self.is_first_message = False

        self.saved_uids.update(new_uids)
Пример #15
0
    def test_update_index(self):
        mother_case_id = uuid.uuid4().hex
        _submit_case_block(
            True, mother_case_id, user_id='user1', owner_id='owner1', case_type='mother',
            case_name='mother', date_modified=datetime.utcnow()
        )

        child_case_id = uuid.uuid4().hex
        _submit_case_block(
            True, child_case_id, user_id='user1', owner_id='owner1', case_type='child',
            case_name='child', date_modified=datetime.utcnow(), index={
                'mom': ('mother', mother_case_id)
            }
        )

        case = self.casedb.get_case(child_case_id)
        self.assertEqual(case.indices[0].identifier, 'mom')

        _submit_case_block(
            False, child_case_id, user_id='user1', date_modified=datetime.utcnow(), index={
                'mom': ('other_mother', mother_case_id)
            }
        )
        case = self.casedb.get_case(child_case_id)
        self.assertEqual(case.indices[0].referenced_type, 'other_mother')
Пример #16
0
    def attach_data(self, data, original_filename=None, username=None, attachment_id=None,
                    media_meta=None):
        """
        This creates the auxmedia attachment with the downloaded data.
        """
        self.last_modified = datetime.utcnow()

        if not attachment_id:
            attachment_id = self.file_hash

        if not self._attachments or attachment_id not in self._attachments:
            if not getattr(self, '_id'):
                # put attchment blows away existing data, so make sure an id has been
                # assigned to this guy before we do it. this is the expected path
                self.save()
            else:
                # this should only be files that had attachments deleted while the bug
                # was in effect, so hopefully we will stop seeing it after a few days
                logging.error('someone is uploading a file that should have existed for multimedia %s' % self._id)
            self.put_attachment(data, attachment_id, content_type=self.get_mime_type(data, filename=original_filename))
        new_media = AuxMedia()
        new_media.uploaded_date = datetime.utcnow()
        new_media.attachment_id = attachment_id
        new_media.uploaded_filename = original_filename
        new_media.uploaded_by = username
        new_media.checksum = self.file_hash
        if media_meta:
            new_media.media_meta = media_meta
        self.aux_media.append(new_media)
        self.save()
        return True
Пример #17
0
def weblate_context(request):
    '''
    Context processor to inject various useful variables into context.
    '''
    return {
        'version': weblate.VERSION,

        'weblate_url': URL_BASE % weblate.VERSION,
        'donate_url': URL_DONATE % weblate.VERSION,

        'site_title': appsettings.SITE_TITLE,
        'site_url': get_site_url(),

        'offer_hosting': appsettings.OFFER_HOSTING,
        'demo_server': appsettings.DEMO_SERVER,
        'enable_avatars': appsettings.ENABLE_AVATARS,

        'current_date': datetime.utcnow().strftime('%Y-%m-%d'),
        'current_year': datetime.utcnow().strftime('%Y'),
        'current_month': datetime.utcnow().strftime('%m'),

        'current_url': request.get_full_path(),

        'mt_enabled': appsettings.MACHINE_TRANSLATION_ENABLED,
        'hooks_enabled': appsettings.ENABLE_HOOKS,

        'registration_open': appsettings.REGISTRATION_OPEN,
    }
Пример #18
0
    def test_serialize(self):
        """Objects are serialized to JSON-compatible objects"""

        def epoch(obj):
            """Convert to JS Epoch time"""
            return int(time.mktime(obj.timetuple())) * 1000

        types = [('test', str, 'test'),
                 (pd.Timestamp('2013-06-08'), int,
                  epoch(pd.Timestamp('2013-06-08'))),
                 (datetime.utcnow(), int, epoch(datetime.utcnow())),
                 (1, int, 1),
                 (1.0, float, 1.0),
                 (np.float32(1), float, 1.0),
                 (np.int32(1), int, 1),
                 (np.float64(1), float, 1.0),
                 (np.int64(1), int, 1)]

        for puts, pytype, gets in types:
            nt.assert_equal(Data.serialize(puts), gets)

        class BadType(object):
            """Bad object for type warning"""

        test_obj = BadType()
        with nt.assert_raises(LoadError) as err:
            Data.serialize(test_obj)
        nt.assert_equals(err.exception.message,
                         'cannot serialize index of type BadType')
Пример #19
0
def get_user_rank(user, subreddit):
    """Returns the user's rank in the subreddit."""
    sr_name = subreddit.display_name.lower()

    # fetch mod/contrib lists if necessary
    cached = False
    if sr_name in get_user_rank.moderator_cache:
        cache_age = datetime.utcnow() - get_user_rank.cache_time[sr_name] 
        if cache_age < timedelta(hours=1):
            cached = True

    if not cached:
        get_user_rank.cache_time[sr_name] = datetime.utcnow()

        mod_list = set()
        for mod in subreddit.get_moderators():
            mod_list.add(mod.name)
        get_user_rank.moderator_cache[sr_name] = mod_list

        contrib_list = set()
        try:
            for contrib in subreddit.get_contributors():
                contrib_list.add(contrib.name)
        except HTTPError as e:
            if e.response.status_code != 404:
                raise
        get_user_rank.contributor_cache[sr_name] = contrib_list

    if user.name in get_user_rank.moderator_cache[sr_name]:
        return 'moderator'
    elif user.name in get_user_rank.contributor_cache[sr_name]:
        return 'contributor'
    else:
        return 'user'
Пример #20
0
 def getAccessToken(self):
     poyntTokenUrl = self.apiHost + "/token"
     currentDatetime = datetime.utcnow()
     expiryDatetime = datetime.utcnow() + timedelta(seconds=300)
     payload = {
         'exp': expiryDatetime,
         'iat': currentDatetime,
         'iss': self.applicationId,
         'sub': self.applicationId,
         'aud': 'https://services.poynt.net',
         'jti': str(uuid.uuid4())
     }
     encodedJWT = jwt.encode(payload, self.rsaPrivateKey, algorithm='RS256')
     #print encodedJWT
     payload = {'grantType':'urn:ietf:params:oauth:grant-type:jwt-bearer', 'assertion':encodedJWT}
     print "Obtaining AccessToken using self-signed JWT:"
     code, jsonObj = self._sendFormPostRequest(poyntTokenUrl, payload, {})
     #r = requests.post(poyntTokenUrl, data=payload, headers=headers)
     #prettyPrint(r.json())
     if code == requests.codes.ok:
         self.accessToken = jsonObj['accessToken']
         self.tokenType = jsonObj['tokenType']
         self.refreshToken = jsonObj['refreshToken']
         return True
     else:
         print "*** FAILED TO OBTAIN ACCESS TOKEN ***"
         return False
Пример #21
0
    def enqueue_job(self, job):
        """
        Move a scheduled job to a queue. In addition, it also does puts the job
        back into the scheduler if needed.
        """
        self.log.debug('Pushing {0} to {1}'.format(job.id, job.origin))

        interval = job.meta.get('interval', None)
        repeat = job.meta.get('repeat', None)

        # If job is a repeated job, decrement counter
        if repeat:
            job.meta['repeat'] = int(repeat) - 1
        job.enqueued_at = datetime.utcnow()
        job.save()

        queue = self.get_queue_for_job(job)
        queue.push_job_id(job.id)
        self.connection.zrem(self.scheduled_jobs_key, job.id)

        if interval:
            # If this is a repeat job and counter has reached 0, don't repeat
            if repeat is not None:
                if job.meta['repeat'] == 0:
                    return
            self.connection._zadd(self.scheduled_jobs_key,
                                  to_unix(datetime.utcnow()) + int(interval),
                                  job.id)
Пример #22
0
    def add_history(self, source, description=None, first_seen=None, last_seen=None, active=False):
        last_seen = last_seen or datetime.utcnow()
        first_seen = first_seen or datetime.utcnow()

        # Do we have to extend current active record ?
        if active:
            active_history = self.get_active(description)
            if active_history and last_seen > active_history.last_seen:
                active_history.last_seen = last_seen
                self.save(validate=False)
                return self
        # Do we have to extend an inactive record ?
        else:
            index, overlapping_history = self._get_overlapping(description, first_seen, last_seen)
            if overlapping_history:
                if source not in overlapping_history.sources:
                    overlapping_history.sources.append(source)

                overlapping_history.first_seen = min(overlapping_history.first_seen, first_seen)
                overlapping_history.last_seen = max(overlapping_history.last_seen, last_seen)
                self.save(validate=False)
                return self

        # Otherwise, just create a new record
        return self.modify(
            push__history=LinkHistory(
                description=description,
                first_seen=first_seen or datetime.utcnow(),
                last_seen=last_seen or datetime.utcnow(),
                active=active,
                sources=[source]))
Пример #23
0
    def testSimpleIntervals(self):
        td_days = timedelta(days=7)
        time_start = datetime.utcnow() - timedelta(days=365)
        time_end = datetime.utcnow()

        tree = IntervalNode(time_start.toordinal(), time_end.toordinal())

        int_end = time_start + td_days
        int_start = time_start
        counter = 0
        while int_end < time_end:
            tree.insert(int_start.toordinal(), int_end.toordinal(), other="week %d" % counter)

            int_start = int_end
            int_end = int_end+td_days
#            print "inserting week: %d" % counter
            counter = counter + 1

        td_hours = timedelta(hours=4)

        start_check = time_start
        check_time = time_start
        day_count = 0
        hour_count = 0
        self.node_hits = 0
        while check_time < time_end:
            def report_schedule(node):
                if node.other is not None:
                    self.node_hits += 1
            tree.intersect(check_time.toordinal(), check_time.toordinal(), report_schedule)
            check_time = check_time + td_hours
            if check_time > start_check+td_days:
                #print "node hits per interval: %d" % self.node_hits
                self.node_hits = 0
                start_check = check_time
Пример #24
0
    def get_next_job(self, suites, components, arches, checks):
        NAMESPACE.machine.last_ping = datetime.utcnow()

        if self.__class__.shutdown_request:
            return None

        arches = [x for x in arches if x not in ["source", "all"]]
        job = NAMESPACE.session.query(Job).join(Job.source).join(Source.group_suite).filter(
            ~Job.depedencies.any(),
            Job.dose_report == None,
            Job.assigned_at == None,
            Job.finished_at == None,
            Job.failed.is_(None),
            GroupSuite.suite.has(Suite.name.in_(suites)),
            Source.component.has(Component.name.in_(components)),
            (Job.arch.has(Arch.name.in_(arches)) |
             (Job.arch.has(Arch.name.in_(["source", "all"])) &
              Source.affinity.has(Arch.name.in_(arches)))),
            Job.check.has(Check.name.in_(checks)),
        ).order_by(
            Job.assigned_count.asc(),
            Source.uploaded_at.asc(),
        ).first()

        if job is None:
            return None

        job.assigned_count += 1
        job.assigned_at = datetime.utcnow()
        job.builder = NAMESPACE.machine

        emit('start', 'job', job.debilize())

        return job.debilize()
Пример #25
0
    def start(self):
        """ Begins the job by kicking off all tasks with no dependencies. """

        if not self.state.allow_start:
            raise DagobahError('job cannot be started in its current state; ' +
                               'it is probably already running')

        self.snapshot = deepcopy(self.graph)

        is_valid, reason = self.validate(self.snapshot)
        if not is_valid:
            raise DagobahError(reason)

        # don't increment if the job was run manually
        if self.cron_iter and datetime.utcnow() > self.next_run:
            self.next_run = self.cron_iter.get_next(datetime)

        self.run_log = {'job_id': self.job_id,
                        'name': self.name,
                        'parent_id': self.parent.dagobah_id,
                        'log_id': self.backend.get_new_log_id(),
                        'start_time': datetime.utcnow(),
                        'tasks': {}}
        self._set_status('running')

        for task in self.tasks.itervalues():
            task.reset()

        for task_name in self.ind_nodes(self.snapshot):
            self._put_task_in_run_log(task_name)
            self.tasks[task_name].start()

        self._commit_run_log()
Пример #26
0
def bulk_comment_insert(Session, commentdata, thread_id):
    """
    Bulk insert of multiple comments into the database for a given thread id (e.g. 117 not ew38xn)
    :param Session: sql alchemy session
    :param commentdata: list of comment dictionaries
    :param thread_id: thread id (e.g. 117 not ew38xn)
    :return:
    """

    # init db session
    session = Session()

    # iterate through all comments and add them to the session
    for comment in commentdata:
        newcomment = Comment(
                link_id=thread_id,
                name=comment['name'],
                parent_id=comment['parent_id'],
                score=comment['score'],
                created_utc=comment['created_utc'],
                author=comment['author'],
                body=comment['body'],
                body_html=comment['body_html'],
                lastchecked=datetime.utcnow(),
                lastmodified=datetime.utcnow()
        )

        # add to session and commit
        session.add(newcomment)

    # commit all changes
    session.commit()
Пример #27
0
def get(name, filename):
	url = BASE_URL + filename
	allowed = robot.is_allowed(USER_AGENT, url)
	delay = robot.get_crawl_delay(USER_AGENT)
	if delay is None:
		delay = 0.1
	if allowed:
		sleep(delay)

	print("Downloading " + name + "...", flush=True, end="")
	try:
		if not allowed:
			raise Exception("Not allowed to download " + url)

		start = datetime.utcnow()
		r = session.get(url)
		duration = (datetime.utcnow() - start).total_seconds()
		r.raise_for_status()
		print(" " + size_fmt(len(r.text)) + " in " + time_fmt(duration) + " (" + size_fmt(len(r.text) / duration) + "/s)")
	except Exception as e:
		print(" " + str(e))
		raise

	if r.headers["Content-Type"] == "text/plain":
		r.encoding = "UTF-8"
	return r.text
Пример #28
0
def create_or_login(resp):
	session['openid'] = resp.identity_url

	user_model = User()
	user = user_model.find({'email' : resp.email})[0]
	if user is None:
		user_data = structure = {
			"nick" : resp.nickname or resp.fullname,
			"fullname" : resp.fullname,
			"email" : resp.email,
			"favourites" : [],
			"roles" : [],
			"reputation" : 1,
			"logins" : [{
				'ip' : request.remote_addr,
				'date_loggedin' : datetime.utcnow().isoformat(),
			}],
			"about" : "",
			"language" : resp.language,
			"timezone" : resp.timezone
		}
		user_data["_id"] = user_model.insert(user_data)
		session["userid"] = str(user_data["_id"])
		app.logger.log(json.dumps(session))
	else:
		user_model.update({"_id" : user["_id"]}, {"$push" : {"logins" : {
			"ip" : request.remote_addr,
			"date_loggedin" : datetime.utcnow().isoformat(),
		}}})
		session["userid"] = str(user["_id"]) 
	return redirect(url_for("bp_word.home")) 
Пример #29
0
    def run(self, **kwargs):
        logger.info("TASK :: event_dispatcher")

        # List all the events where event.start > NOW() - 12 hours and status = EVENT_STATUS.PENDING
        start_from = datetime.utcnow().replace(tzinfo=utc) - timedelta(hours=12)
        start_to = datetime.utcnow().replace(tzinfo=utc)
        event_list = Event.objects.filter(start__gte=start_from, start__lte=start_to, status=EVENT_STATUS.PENDING)
        for obj_event in event_list:
            try:
                # Get and perform alarm
                obj_alarm = Alarm.objects.get(event=obj_event)
                perform_alarm.delay(obj_event, obj_alarm)
            except ObjectDoesNotExist:
                pass

            # Check if need to create a sub event in the future
            next_occurrence = obj_event.get_next_occurrence()
            print "next_occurrence"
            print next_occurrence

            if next_occurrence:
                # The result of get_next_occurrences help to create the next event
                new_event = obj_event.copy_event(next_occurrence)

                # Copy the alarm link to the event
                alarm_list = Alarm.objects.filter(event=obj_event)
                for obj_alarm in alarm_list:
                    obj_alarm.copy_alarm(new_event)

            # Mark the event as COMPLETED
            obj_event.status = EVENT_STATUS.COMPLETED
            obj_event.save()
Пример #30
0
def home(request):
    """This is home page of the site."""

    username = None
    language_code = request.LANGUAGE_CODE

    # we need to get testimonials in the users language
    all_testimonials = Testimonial.objects.filter(language=language_code)

    context = {
        'testimonial': choice(all_testimonials),
    }

    # we also need to get problems counter
    counter = ProblemCounter.objects.get(pk=1)
    # lets see if we need to increase counter
    if counter.next_update > datetime.utcnow().replace(tzinfo=utc):
        # not yet
        context.update({
            'counter': counter.count,
        })
    else:
        # time to increase counter and set new time
        new_counter = counter.count + 1
        new_update_time = datetime.utcnow().replace(tzinfo=utc) + timedelta(hours=choice(range(4, 16)))
        counter.count = new_counter
        counter.next_update = new_update_time
        counter.save()
        context.update({
            'counter': new_counter,
        })

    return render(request, 'mainsite/index.html', context)
Пример #31
0
      start = start_ts.strftime('%Y-%m-%dT%H:%M:%SZ')
      gap = _format_interval(interval)
      if start_was_none:
        start_ts = _clamp_date(interval, start_ts)
        start = start_ts.strftime('%Y-%m-%dT%H:%M:%SZ')
        stats_max = end
        stats_min = start
      else:
        start = start_ts.strftime('%Y-%m-%dT%H:%M:%SZ')
    elif stat_facet['max'] == 'NOW':
      is_date = True
      domain_ms = _get_interval_duration(stat_facet['min'])
      interval = _get_interval(domain_ms, SLOTS)
      nb_slot = domain_ms / interval['ms']
      gap = _format_interval(interval)
      end_ts = datetime.utcnow()
      end_ts_clamped = _clamp_date(interval, end_ts)
      end_ts = _get_next_interval(interval, end_ts_clamped, end_ts_clamped != end_ts)
      start_ts = _remove_duration(interval, nb_slot, end_ts)
      stats_max = end = end_ts.strftime('%Y-%m-%dT%H:%M:%SZ')
      stats_min = start = start_ts.strftime('%Y-%m-%dT%H:%M:%SZ')

    properties.update({
      'min': stats_min,
      'max': stats_max,
      'start': start,
      'end': end,
      'gap': gap,
      'slot': SLOTS,
      'canRange': True,
      'isDate': is_date,
Пример #32
0
    def send(self, request, **kwargs):
        """
        Send a given PreparedRequest.

        :rtype: requests.Response
        """
        # Set defaults that the hooks can utilize to ensure they always have
        # the correct parameters to reproduce the previous request.
        kwargs.setdefault('stream', self.stream)
        kwargs.setdefault('verify', self.verify)
        kwargs.setdefault('cert', self.cert)
        kwargs.setdefault('proxies', self.proxies)

        # It's possible that users might accidentally send a Request object.
        # Guard against that specific failure case.
        if isinstance(request, Request):
            raise ValueError('You can only send PreparedRequests.')

        # Set up variables needed for resolve_redirects and dispatching of hooks
        allow_redirects = kwargs.pop('allow_redirects', True)
        stream = kwargs.get('stream')
        hooks = request.hooks

        # Resolve URL in redirect cache, if available.
        if allow_redirects:
            checked_urls = set()
            while request.url in self.redirect_cache:
                checked_urls.add(request.url)
                new_url = self.redirect_cache.get(request.url)
                if new_url in checked_urls:
                    break
                request.url = new_url

        # Get the appropriate adapter to use
        adapter = self.get_adapter(url=request.url)

        # Start time (approximately) of the request
        start = datetime.utcnow()

        # Send the request
        r = adapter.send(request, **kwargs)

        # Total elapsed time of the request (approximately)
        r.elapsed = datetime.utcnow() - start

        # Response manipulation hooks
        r = dispatch_hook('response', hooks, r, **kwargs)

        # Persist cookies
        if r.history:

            # If the hooks create history then we want those cookies too
            for resp in r.history:
                extract_cookies_to_jar(self.cookies, resp.request, resp.raw)

        extract_cookies_to_jar(self.cookies, request, r.raw)

        # Redirect resolving generator.
        gen = self.resolve_redirects(r, request, **kwargs)

        # Resolve redirects if allowed.
        history = [resp for resp in gen] if allow_redirects else []

        # Shuffle things around if there's history.
        if history:
            # Insert the first (original) request at the start
            history.insert(0, r)
            # Get the last request made
            r = history.pop()
            r.history = history

        if not stream:
            r.content

        return r
Пример #33
0
from falcon import testing
import pytest
import json
from datetime import datetime, timedelta
import sys, os
myPath = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, myPath + '/../')
from app import api

JWT_SECRET = 'secret'
JWT_ALGORITHM = 'HS256'
JWT_EXP_DELTA_SECONDS = 86400

token = jwt.encode(dict(
    user_id = int(1),
    exp = datetime.utcnow() + timedelta(seconds=int(JWT_EXP_DELTA_SECONDS))
), JWT_SECRET, algorithm = JWT_ALGORITHM).decode('UTF-8')

customer1 = {
    'id': 1,
    'name': 'Ronaldo',
    'dob': '1991-01-08'
}

@pytest.fixture()
def client():
    # Assume the hypothetical `myapp` package has a function called
    # `create()` to initialize and return a `falcon.API` instance.
    return testing.TestClient(api)

Пример #34
0
    def _update_subscription(self, subscription):
        """ Update subscription settings """

        db = current.db
        s3db = current.s3db

        pe_id = subscription["pe_id"]

        # Save filters
        filter_id = subscription["filter_id"]
        filters = subscription.get("filters")
        if filters:
            ftable = s3db.pr_filter

            if not filter_id:
                success = ftable.insert(pe_id=pe_id, query=filters)
                filter_id = success
            else:
                success = db(ftable.id == filter_id).update(query=filters)
            if not success:
                return None

        # Save subscription settings
        stable = s3db.pr_subscription
        subscription_id = subscription["id"]
        frequency = subscription["frequency"]
        if not subscription_id:
            success = stable.insert(pe_id=pe_id,
                                    filter_id=filter_id,
                                    notify_on=subscription["notify_on"],
                                    frequency=frequency,
                                    method=subscription["method"])
            subscription_id = success
        else:
            success = db(stable.id == subscription_id).update(
                pe_id=pe_id,
                filter_id=filter_id,
                notify_on=subscription["notify_on"],
                frequency=frequency,
                method=subscription["method"])
        if not success:
            return None

        # Save subscriptions
        rtable = s3db.pr_subscription_resource
        subscribe = subscription.get("subscribe")
        if subscribe:
            from datetime import datetime, timedelta
            now = datetime.utcnow()
            resources = subscription["resources"]

            subscribed = {}
            timestamps = {}
            if resources:
                for r in resources:
                    subscribed[(r.resource, r.url)] = r.id
                    timestamps[r.id] = (r.last_check_time, r.next_check_time)

            intervals = s3db.pr_subscription_check_intervals
            interval = timedelta(minutes=intervals.get(frequency, 0))

            keep = set()
            fk = '''{"subscription_id": %s}''' % subscription_id
            for new in subscribe:
                resource, url = new["resource"], new["url"]
                if (resource, url) not in subscribed:
                    # Restore subscription if previously unsubscribed, else
                    # insert new record
                    unsubscribed = {
                        "deleted": True,
                        "deleted_fk": fk,
                        "resource": resource,
                        "url": url
                    }
                    rtable.update_or_insert(_key=unsubscribed,
                                            deleted=False,
                                            deleted_fk=None,
                                            subscription_id=subscription_id,
                                            resource=resource,
                                            url=url,
                                            last_check_time=now,
                                            next_check_time=None)
                else:
                    # Keep it
                    record_id = subscribed[(resource, url)]
                    last_check_time, next_check_time = timestamps[record_id]
                    data = {}
                    if not last_check_time:
                        # Someone has tampered with the timestamps, so
                        # we need to reset them and start over
                        last_check_time = now
                        data["last_check_time"] = last_check_time
                    due = last_check_time + interval
                    if next_check_time != due:
                        # Time interval has changed
                        data["next_check_time"] = due
                    if data:
                        db(rtable.id == record_id).update(**data)
                    keep.add(record_id)

            # Unsubscribe all others
            unsubscribe = set(subscribed.values()) - keep
            db(rtable.id.belongs(unsubscribe)).update(deleted=True,
                                                      deleted_fk=fk,
                                                      subscription_id=None)

        # Update subscription
        subscription["id"] = subscription_id
        subscription["filter_id"] = filter_id
        return subscription
Пример #35
0
def get_utcnow():
    """
    Used to make it easier to mock utcnow() in the tests.
    """
    return datetime.utcnow()
Пример #36
0
def antennaS11(argv):
  """
  Acquire OSL and antenna traces through a receiver to produce 
  a calibrated antenna S11 measurement.
  """
  
  # Set default parameter values
  result = True
  parser = argparse.ArgumentParser(description= 'Acquire OSL and antenna traces through the receiver to produce a calibrated antenna S11 measurement.')
  parser.add_argument('folder', help='Output folder to save traces.')
  parser.add_argument('-m', '--message', nargs=1, help='Store a comment with the measurement')
  parser.add_argument('-e', '--external', help='Apply correction from lab measurement of external OSL.  Specify calibration folder')
  parser.add_argument('-f', '--freq', nargs='?', default=75e6, type=float, help='Frequency (Hz) for normalizing the external cal fits')
  parser.add_argument('-n', '--npoly', nargs='?', default=10, type=int, help='Number of polynomial terms in external cal fits')
  parser.add_argument('-p', '--plot', action='store_true', help='Plot the antenna S11')
  parser.add_argument('-r', '--revisit', action='store_true', help='Use existing data in specified folder rather than acquire new traces')
  args = parser.parse_args()

  print args


  # ---------------------------------------------------------------------------
  # Get the EDGES configuration
  # ---------------------------------------------------------------------------

  # Get an instance of the EDGES class
  d = edges.edges()

  # Read the environment sensors
  values = d.getEnvironmentSensors()

  # Read the sensor labels from the EDGES settings
  labels = (  d.settings.get('Environment', 'sensor1_label'),
              d.settings.get('Environment', 'sensor2_label'),
              d.settings.get('Environment', 'sensor3_label'),
              'Rack Temperature' )

  # Read the sensor units from the EDGES settings
  units = ( d.settings.get('Environment', 'sensor1_unit'),
            d.settings.get('Environment', 'sensor2_unit'),
            d.settings.get('Environment', 'sensor3_unit'),
            'K' )
    
  # Read the installation settings
  site = d.settings.get('Installation', 'site')
  instrument = d.settings.get('Installation', 'instrument')

  # Set the subdir from the specified folder
  subdir = '{}/{}/s11/{}'.format(site, instrument, args.folder)

  gammas = None

  print('')

  if args.revisit:

    # -------------------------------------------------------------------------
    # Load existing traces for additional work
    # -------------------------------------------------------------------------

    # Look for trace files in the folder
    files = sorted(glob.glob('{}/{}/*_input1.s1p'.format(d.getDataDir(), subdir)))

    if len(files) > 1:
      print('Found multiple trace sets, using first set\n')

    # Get the timestamp
    timestamp = d.parseFileDateString(os.path.split(files[0])[1][:-11])

    print('Using existing traces from: {}\n'.format(timestamp))

    # Get a set of trace files
    inputfiles = []
    inputfiles.append(files[0])
    inputfiles.append('{}_input2.s1p'.format(files[0][:-11]))
    inputfiles.append('{}_input3.s1p'.format(files[0][:-11]))
    inputfiles.append('{}_input4.s1p'.format(files[0][:-11]))

    # Load the trace files
    for index, name in enumerate(inputfiles):
      r, f = vna.s1p_read(name)
      if gammas == None:
        gammas = np.zeros((len(f),4), dtype=np.complex128)
      gammas[:,index] = r

    print('Successfully loaded traces.\n')

  else:

    # -------------------------------------------------------------------------
    # Connect to VNA and acquire new traces
    # -------------------------------------------------------------------------

    # Get the current time
    timestamp = datetime.utcnow()

    print '\nTimestamp: {}'.format(timestamp)

    print('\nReading and writing environmental sensor data...\n')

    # Print the labels and values to stdout
    for l, v, u in zip(labels, values, units):
      print("{}: {:.2f} {}".format(l.rjust(25), v, u))

    # Concatenate label text and units
    fullLabels = ["{} [{}]".format(l, u) for l, u in zip(labels, units)]
	
    # Write to EDGES file record
    result = d.writeRecord(subdir, 'sensors', timestamp, values, fullLabels, breaklevel=5)

    print("")

    # Start the VNA measurements
    session = d.startVNASession()

    # Loop over each OSL+Antenna input
    inputs = {1, 2, 3, 4}
    for i in inputs:

      print('\nAcquiring input {}...'.format(i))

      # Acquire the trace and save it to the subdir
      data = d.getVNATrace(i, session)

      # Write to file
      output_path = d.getFullPath(subdir, 'input{}.s1p'.format(i), timestamp, level=5)
      with open (output_path, "w") as outfile:
        outfile.write(data)
      
      # Add to our array of traces
      r, f = vna.s1p_read_from_text(data)
      if gammas == None:
        gammas = np.zeros((len(f),4), dtype=np.complex128)

      gammas[:,i-1] = r
      
    # Close the VNA session  
    d.stopVNASession(session)

  ## end if args.revisit

  # write comment if there is one
  if args.message:
    result = d.writeText(subdir, 'message', timestamp, args.message[0], 'Message', breaklevel=5)
    print('\nMessage: ' + args.message[0] + '\n') 

  if gammas == None:
    return False

  # ---------------------------------------------------------------------------
  # Apply internal OSL calibration
  # ---------------------------------------------------------------------------
  print('Applying internal OSL correction...\n')

  # Correct the antenna S11 using the OSL inputs
  gammas_internal_corrected = vna.internal_s11_correction(gammas[:,0], gammas[:,1], gammas[:,2], gammas[:,3])

  # Save internally calibrated antenna S11
  output_path = d.getFullPath(subdir, 'antenna_s11.s1p', timestamp, level=5)
  vna.s1p_write(output_path, f, gammas_internal_corrected, 0)

  # ---------------------------------------------------------------------------
  # Apply external OSL calibration (optional)
  # ---------------------------------------------------------------------------
  if args.external != None:
    
    print('Applying (optional) external OSL correction...\n')

    bExternalCorrection = True

    folder_calib = args.external
    subdir_calib = '{}/{}/s11_calibration/{}'.format(site, instrument, folder_calib)

    # The usual internal OSL standards measured at time of external calibration
    int_open, f_int = vna.s1p_read('{}/{}/internal_open.s1p'.format(d.getDataDir(), subdir_calib))
    int_short, f_int = vna.s1p_read('{}/{}/internal_short.s1p'.format(d.getDataDir(), subdir_calib))
    int_load, f_int = vna.s1p_read('{}/{}/internal_load.s1p'.format(d.getDataDir(), subdir_calib))

    # External sources measured through input 4
    ext_open, f_ext = vna.s1p_read('{}/{}/external_open.s1p'.format(d.getDataDir(), subdir_calib))
    ext_short, f_ext = vna.s1p_read('{}/{}/external_short.s1p'.format(d.getDataDir(), subdir_calib))
    ext_load, f_ext = vna.s1p_read('{}/{}/external_load.s1p'.format(d.getDataDir(), subdir_calib))
    resistance_of_match = np.genfromtxt('{}/{}/resistance_of_match.txt'.format(d.getDataDir(), subdir_calib), comments='#')

    # Apply the correction
    gammas_external_corrected = vna.external_s11_correction(
        f, gammas_internal_corrected, 
        f_ext, int_open, int_short, int_load, 
        ext_open, ext_short, ext_load, 
        resistance_of_match, 
        f_norm=args.freq, N_poly_terms=args.npoly)[0]

    # Save externally calibrated antenna S11
    output_path = d.getFullPath(subdir, 'calibrated_{}_antenna_s11.s1p'.format(folder_calib), timestamp, level=5)
    vna.s1p_write(output_path, f, gammas_external_corrected, 0)

  # ---------------------------------------------------------------------------
  # Make output plot and save to PNG
  # ---------------------------------------------------------------------------  
  if args.plot:

    # Plot
    f1 = plt.figure(num=1, figsize=(14, 6))

    plt.subplot(1,2,1)
    plt.plot(f/1e6, 20 * np.log10(np.abs(gammas_internal_corrected)))
    if args.external != None:
      plt.plot(f/1e6, 20 * np.log10(np.abs(gammas_external_corrected)))
    ax = plt.gca()
    ax.set_yticks(np.arange(-20,0,1))
    plt.xlim([40, 200])
    plt.ylim([-20, 0])
    plt.grid()
    plt.xlabel('frequency [MHz]')
    plt.ylabel('magnitude [dB]')

    plt.subplot(1,2,2)
    plt.plot(f/1e6, 180/np.pi * np.unwrap(np.angle(gammas_internal_corrected)))
    if args.external != None:
      plt.plot(f/1e6, 180/np.pi * np.unwrap(np.angle(gammas_external_corrected)))
    plt.xlim([40, 200])
    plt.ylim([-800, 20])
    plt.grid()
    plt.xlabel('frequency [MHz]')
    plt.ylabel('phase [deg]')

    # Save plot
    output_plot = d.getFullPath(subdir, 'antenna_s11.png', timestamp, level=5)
    plt.savefig(output_plot, bbox_inches='tight')
    plt.close(f1)

    # Display plot
    eog_command = 'eog {} &'.format(output_plot)
    subprocess.call(eog_command, shell = True)

 
  if result:
    print('Success.\n')
  else:
    print('Failed.\n')

  return result
Пример #37
0
def ingest(objectid,
           dsets_file,
           grq_update_url,
           dataset_processed_queue,
           prod_path,
           job_path,
           dry_run=False,
           force=False):
    """Run dataset ingest."""
    logger.info("#" * 80)
    logger.info("datasets: %s" % dsets_file)
    logger.info("grq_update_url: %s" % grq_update_url)
    logger.info("dataset_processed_queue: %s" % dataset_processed_queue)
    logger.info("prod_path: %s" % prod_path)
    logger.info("job_path: %s" % job_path)
    logger.info("dry_run: %s" % dry_run)
    logger.info("force: %s" % force)

    # get dataset
    if os.path.isdir(prod_path):
        local_prod_path = prod_path
    else:
        local_prod_path = get_remote_dav(prod_path)
    if not os.path.isdir(local_prod_path):
        raise RuntimeError("Failed to find local dataset directory: %s" %
                           local_prod_path)

    # dataset name
    pname = os.path.basename(local_prod_path)

    # dataset file
    dataset_file = os.path.join(local_prod_path, '%s.dataset.json' % pname)

    # get dataset json
    with open(dataset_file) as f:
        dataset = json.load(f)
    logger.info("Loaded dataset JSON from file: %s" % dataset_file)

    # check minimum requirements for dataset JSON
    logger.info("Verifying dataset JSON...")
    verify_dataset(dataset)
    logger.info("Dataset JSON verfication succeeded.")

    # get version
    version = dataset['version']

    # recognize
    r = Recognizer(dsets_file, local_prod_path, objectid, version)

    # get ipath
    ipath = r.currentIpath

    # get extractor
    extractor = r.getMetadataExtractor()
    if extractor is not None:
        match = SCRIPT_RE.search(extractor)
        if match: extractor = match.group(1)
    logger.info("Configured metadata extractor: %s" % extractor)

    # metadata file
    metadata_file = os.path.join(local_prod_path, '%s.met.json' % pname)

    # metadata seed file
    seed_file = os.path.join(local_prod_path, 'met.json')

    # metadata file already here
    if os.path.exists(metadata_file):
        with open(metadata_file) as f:
            metadata = json.load(f)
        logger.info("Loaded metadata from existing file: %s" % metadata_file)
    else:
        if extractor is None:
            logger.info(
                "No metadata extraction configured. Setting empty metadata.")
            metadata = {}
        else:
            logger.info("Running metadata extractor %s on %s" %
                        (extractor, local_prod_path))
            m = check_output([extractor, local_prod_path])
            logger.info("Output: %s" % m)

            # generate json to update metadata and urls
            metadata = json.loads(m)

            # set data_product_name
            metadata['data_product_name'] = objectid

            # merge with seed metadata
            if os.path.exists(seed_file):
                with open(seed_file) as f:
                    seed = json.load(f)
                metadata.update(seed)
                logger.info("Loaded seed metadata from file: %s" % seed_file)

            # write it out to file
            with open(metadata_file, 'w') as f:
                json.dump(metadata, f, indent=2)
            logger.info("Wrote metadata to %s" % metadata_file)

            # delete seed file
            if os.path.exists(seed_file):
                os.unlink(seed_file)
                logger.info("Deleted seed file %s." % seed_file)

    # add context
    context_file = os.path.join(local_prod_path, '%s.context.json' % pname)
    if os.path.exists(context_file):
        with open(context_file) as f:
            context = json.load(f)
        logger.info("Loaded context from existing file: %s" % context_file)
    else:
        context = {}
    metadata['context'] = context

    # set metadata and dataset groups in recognizer
    r.setDataset(dataset)
    r.setMetadata(metadata)

    # get level
    level = r.getLevel()

    # get type
    dtype = r.getType()

    # get publish path
    pub_path_url = r.getPublishPath()

    # get publish urls
    pub_urls = [i for i in r.getPublishUrls()]

    # get S3 profile name and api keys for dataset publishing
    s3_secret_key, s3_access_key = r.getS3Keys()
    s3_profile = r.getS3Profile()

    # set osaka params
    osaka_params = {}

    # S3 profile takes precedence over explicit api keys
    if s3_profile is not None:
        osaka_params['profile_name'] = s3_profile
    else:
        if s3_secret_key is not None and s3_access_key is not None:
            osaka_params['aws_access_key_id'] = s3_access_key
            osaka_params['aws_secret_access_key'] = s3_secret_key

    # get browse path and urls
    browse_path = r.getBrowsePath()
    browse_urls = r.getBrowseUrls()

    # get S3 profile name and api keys for browse image publishing
    s3_secret_key_browse, s3_access_key_browse = r.getS3Keys("browse")
    s3_profile_browse = r.getS3Profile("browse")

    # set osaka params for browse
    osaka_params_browse = {}

    # S3 profile takes precedence over explicit api keys
    if s3_profile_browse is not None:
        osaka_params_browse['profile_name'] = s3_profile_browse
    else:
        if s3_secret_key_browse is not None and s3_access_key_browse is not None:
            osaka_params_browse['aws_access_key_id'] = s3_access_key_browse
            osaka_params_browse['aws_secret_access_key'] = s3_secret_key_browse

    # get pub host and path
    logger.info("Configured pub host & path: %s" % (pub_path_url))

    # check scheme
    if not osaka.main.supported(pub_path_url):
        raise RuntimeError("Scheme %s is currently not supported." %
                           urlparse(pub_path_url).scheme)

    # upload dataset to repo; track disk usage and start/end times of transfer
    prod_dir_usage = get_disk_usage(local_prod_path)
    tx_t1 = datetime.utcnow()
    if dry_run:
        logger.info("Would've published %s to %s" %
                    (local_prod_path, pub_path_url))
    else:
        publish_dataset(local_prod_path,
                        pub_path_url,
                        params=osaka_params,
                        force=force)
    tx_t2 = datetime.utcnow()

    # add metadata for all browse images and upload to browse location
    imgs_metadata = []
    imgs = glob('%s/*browse.png' % local_prod_path)
    for img in imgs:
        img_metadata = {'img': os.path.basename(img)}
        small_img = img.replace('browse.png', 'browse_small.png')
        if os.path.exists(small_img):
            small_img_basename = os.path.basename(small_img)
            if browse_path is not None:
                this_browse_path = os.path.join(browse_path,
                                                small_img_basename)
                if dry_run:
                    logger.info("Would've uploaded %s to %s" %
                                (small_img, browse_path))
                else:
                    logger.info("Uploading %s to %s" %
                                (small_img, browse_path))
                    osaka.main.put(small_img,
                                   this_browse_path,
                                   params=osaka_params_browse,
                                   noclobber=False)
        else:
            small_img_basename = None
        img_metadata['small_img'] = small_img_basename
        tooltip_match = BROWSE_RE.search(img_metadata['img'])
        if tooltip_match: img_metadata['tooltip'] = tooltip_match.group(1)
        else: img_metadata['tooltip'] = ""
        imgs_metadata.append(img_metadata)

    # sort browse images
    browse_sort_order = r.getBrowseSortOrder()
    if isinstance(browse_sort_order,
                  types.ListType) and len(browse_sort_order) > 0:
        bso_regexes = [re.compile(i) for i in browse_sort_order]
        sorter = {}
        unrecognized = []
        for img in imgs_metadata:
            matched = None
            for i, bso_re in enumerate(bso_regexes):
                if bso_re.search(img['img']):
                    matched = img
                    sorter[i] = matched
                    break
            if matched is None: unrecognized.append(img)
        imgs_metadata = [sorter[i] for i in sorted(sorter)]
        imgs_metadata.extend(unrecognized)

    # save dataset metrics on size and transfer
    tx_dur = (tx_t2 - tx_t1).total_seconds()
    prod_metrics = {
        'ipath': ipath,
        'url': urlparse(pub_path_url).path,
        'path': local_prod_path,
        'disk_usage': prod_dir_usage,
        'time_start': tx_t1.isoformat() + 'Z',
        'time_end': tx_t2.isoformat() + 'Z',
        'duration': tx_dur,
        'transfer_rate': prod_dir_usage / tx_dur
    }

    # set update json
    ipath = r.currentIpath
    update_json = {
        'id': objectid,
        'objectid': objectid,
        'metadata': metadata,
        'urls': pub_urls,
        'browse_urls': browse_urls,
        'images': imgs_metadata,
        'dataset': ipath.split('/')[1],
        'ipath': ipath,
        'system_version': version,
        'dataset_level': level,
        'dataset_type': dtype,
    }
    update_json.update(dataset)
    #logger.info("update_json: %s" % pformat(update_json))

    # custom index specified?
    index = r.getIndex()
    if index is not None: update_json['index'] = index

    # update GRQ
    if isinstance(update_json['metadata'],
                  types.DictType) and len(update_json['metadata']) > 0:
        #logger.info("update_json: %s" % pformat(update_json))
        if dry_run:
            logger.info("Would've indexed doc at %s: %s" %
                        (grq_update_url,
                         json.dumps(update_json, indent=2, sort_keys=True)))
        else:
            res = index_dataset(grq_update_url, update_json)
            logger.info("res: %s" % res)
            update_json['grq_index_result'] = res

    # finish if dry run
    if dry_run: return (prod_metrics, update_json)

    # create PROV-ES JSON file for publish processStep
    prod_prov_es_file = os.path.join(
        local_prod_path, '%s.prov_es.json' % os.path.basename(local_prod_path))
    pub_prov_es_bn = "publish.prov_es.json"
    if os.path.exists(prod_prov_es_file):
        pub_prov_es_file = os.path.join(local_prod_path, pub_prov_es_bn)
        prov_es_info = {}
        with open(prod_prov_es_file) as f:
            try:
                prov_es_info = json.load(f)
            except Exception, e:
                tb = traceback.format_exc()
                raise (RuntimeError("Failed to load PROV-ES from %s: %s\n%s" %
                                    (prod_prov_es_file, str(e), tb)))
        log_publish_prov_es(prov_es_info, pub_prov_es_file, local_prod_path,
                            pub_urls, prod_metrics, objectid)
        # upload publish PROV-ES file
        osaka.main.put(pub_prov_es_file,
                       os.path.join(pub_path_url, pub_prov_es_bn),
                       params=osaka_params,
                       noclobber=False)
Пример #38
0
    def _fetch(self, url, payload=None, method='GET'):
        """
        Wrapper to request object

        """

        # Prepare our headers:
        headers = {
            'User-Agent': self.app_id,
            'Accept': 'application/json',
            'Authorization': 'Bearer ' + self.token,
        }
        if payload:
            # Only set our header payload if it's defined
            headers['Content-Type'] = 'application/json'

        # Default content response object
        content = {}

        # Update our URL
        url = '{}/{}'.format(GITTER_API_URL, url)

        # Some Debug Logging
        self.logger.debug('Gitter {} URL: {} (cert_verify={})'.format(
            method,
            url, self.verify_certificate))
        if payload:
            self.logger.debug('Gitter Payload: {}' .format(payload))

        # By default set wait to None
        wait = None

        if self.ratelimit_remaining == 0:
            # Determine how long we should wait for or if we should wait at
            # all. This isn't fool-proof because we can't be sure the client
            # time (calling this script) is completely synced up with the
            # Gitter server.  One would hope we're on NTP and our clocks are
            # the same allowing this to role smoothly:

            now = datetime.utcnow()
            if now < self.ratelimit_reset:
                # We need to throttle for the difference in seconds
                # We add 0.5 seconds to the end just to allow a grace
                # period.
                wait = (self.ratelimit_reset - now).total_seconds() + 0.5

        # Always call throttle before any remote server i/o is made
        self.throttle(wait=wait)

        # fetch function
        fn = requests.post if method == 'POST' else requests.get
        try:
            r = fn(
                url,
                data=payload,
                headers=headers,
                verify=self.verify_certificate,
            )

            if r.status_code != requests.codes.ok:
                # We had a problem
                status_str = \
                    NotifyGitter.http_response_code_lookup(r.status_code)

                self.logger.warning(
                    'Failed to send Gitter {} to {}: '
                    '{}error={}.'.format(
                        method,
                        url,
                        ', ' if status_str else '',
                        r.status_code))

                self.logger.debug(
                    'Response Details:\r\n{}'.format(r.content))

                # Mark our failure
                return (False, content)

            try:
                content = loads(r.content)

            except (TypeError, ValueError):
                # ValueError = r.content is Unparsable
                # TypeError = r.content is None
                content = {}

            try:
                self.ratelimit_remaining = \
                    int(r.headers.get('X-RateLimit-Remaining'))
                self.ratelimit_reset = datetime.utcfromtimestamp(
                    int(r.headers.get('X-RateLimit-Reset')))

            except (TypeError, ValueError):
                # This is returned if we could not retrieve this information
                # gracefully accept this state and move on
                pass

        except requests.RequestException as e:
            self.logger.warning(
                'Exception received when sending Gitter {} to {}: '.
                format(method, url))
            self.logger.debug('Socket Exception: %s' % str(e))

            # Mark our failure
            return (False, content)

        return (True, content)
def execute(chain_config, notification_enabled, relay_from_block_num,
            apply_relay_from_block_num, relay_ignore_sec_threshold):
    """ 未完了の入出金処理の検出
    """
    logger.info(
        'Relay from block: {relay_from_block_num}, '
        'Apply Relay from block: {apply_relay_from_block_num}'
        .format(relay_from_block_num=relay_from_block_num,
                apply_relay_from_block_num=apply_relay_from_block_num))

    # Web3プロバイダーの生成
    provider_from = Web3.HTTPProvider(chain_config['chainRpcUrlFrom'])
    provider_to = Web3.HTTPProvider(chain_config['chainRpcUrlTo'])

    # Relayイベントの取得
    relay_events = contract.get_relay_event_logs(
        provider_from,
        chain_config['bridgeContractAddressFrom'],
        relay_from_block_num)

    # ApplyRelayイベントの取得
    apply_relay_events = contract.get_apply_relay_event_logs(
        provider_to,
        chain_config['bridgeContractAddressTo'],
        apply_relay_from_block_num)

    # applyRelayが実行済みのRelayイベントの抽出
    completed_relay_tx_hashes = set()
    for apply_relay_event in apply_relay_events:
        parsed_apply_relay_event = contract.parse_apply_relay_event_log(
            apply_relay_event)
        completed_relay_tx_hashes.add(parsed_apply_relay_event['relayTxHash'])

    # applyRelayが未実行のRelayイベントの抽出
    pending_relays = []
    for relay_event in relay_events:
        parsed_relay_event = contract.parse_relay_event_log(relay_event)

        # NOTICE 直近のRelayイベントはバッチで処理されるまでにタイムラグがあるため無視する
        if (datetime.utcnow().timestamp() - parsed_relay_event['timestamp']) \
                < relay_ignore_sec_threshold:
            continue

        if parsed_relay_event['txHash'] not in completed_relay_tx_hashes:
            pending_relays.append(parsed_relay_event)

    # 未完了の入出金処理が存在するか
    if len(pending_relays) > 0:
        logger.info(
            '{pending_relay_count} pending relays were detected.'
            .format(pending_relay_count=len(pending_relays)))

        for pending_relay in pending_relays:
            logger.info(
                '[RelayEvent] timestamp={timestamp}, blockNum={blockNumber}, '
                'txHash={txHash}, sender={sender}, recipient={recipient}, '
                'amount={amount}, fee={fee}'.format(**pending_relay))

        # Slackに通知
        if notification_enabled:
            notification.notify_pending_relays(
                chain_config['isDeposit'],
                pending_relays,
                relay_from_block_num,
                apply_relay_from_block_num)
    else:
        logger.info("No pending relay was detected.")
Пример #40
0
class NotifyGitter(NotifyBase):
    """
    A wrapper for Gitter Notifications
    """

    # The default descriptive name associated with the Notification
    service_name = 'Gitter'

    # The services URL
    service_url = 'https://gitter.im/'

    # All pushover requests are secure
    secure_protocol = 'gitter'

    # A URL that takes you to the setup/help of the specific protocol
    setup_url = 'https://github.com/caronc/apprise/wiki/Notify_gitter'

    # Allows the user to specify the NotifyImageSize object
    image_size = NotifyImageSize.XY_32

    # Gitter does not support a title
    title_maxlen = 0

    # Gitter is kind enough to return how many more requests we're allowed to
    # continue to make within it's header response as:
    # X-RateLimit-Reset: The epoc time (in seconds) we can expect our
    #                    rate-limit to be reset.
    # X-RateLimit-Remaining: an integer identifying how many requests we're
    #                        still allow to make.
    request_rate_per_sec = 0

    # For Tracking Purposes
    ratelimit_reset = datetime.utcnow()

    # Default to 1
    ratelimit_remaining = 1

    # Default Notification Format
    notify_format = NotifyFormat.MARKDOWN

    # Define object templates
    templates = (
        '{schema}://{token}:{targets}/',
    )

    # Define our template tokens
    template_tokens = dict(NotifyBase.template_tokens, **{
        'token': {
            'name': _('Token'),
            'type': 'string',
            'regex': (r'[a-z0-9]{40}', 'i'),
            'private': True,
            'required': True,
        },
        'targets': {
            'name': _('Rooms'),
            'type': 'list:string',
        },
    })

    # Define our template arguments
    template_args = dict(NotifyBase.template_args, **{
        'image': {
            'name': _('Include Image'),
            'type': 'bool',
            'default': False,
            'map_to': 'include_image',
        },
        'to': {
            'alias_of': 'targets',
        },
    })

    def __init__(self, token, targets, include_image=False, **kwargs):
        """
        Initialize Gitter Object
        """
        super(NotifyGitter, self).__init__(**kwargs)

        try:
            # The personal access token associated with the account
            self.token = token.strip()

        except AttributeError:
            # Token was None
            msg = 'No API Token was specified.'
            self.logger.warning(msg)
            raise TypeError(msg)

        if not VALIDATE_TOKEN.match(self.token):
            msg = 'The Personal Access Token specified ({}) is invalid.' \
                  .format(token)
            self.logger.warning(msg)
            raise TypeError(msg)

        # Parse our targets
        self.targets = parse_list(targets)

        # Used to track maping of rooms to their numeric id lookup for
        # messaging
        self._room_mapping = None

        # Track whether or not we want to send an image with our notification
        # or not.
        self.include_image = include_image

    def send(self, body, title='', notify_type=NotifyType.INFO, **kwargs):
        """
        Perform Gitter Notification
        """

        # error tracking (used for function return)
        has_error = False

        # Set up our image for display if configured to do so
        image_url = None if not self.include_image \
            else self.image_url(notify_type)

        if image_url:
            body = '![alt]({})\n{}'.format(image_url, body)

        # Create a copy of the targets list
        targets = list(self.targets)
        if self._room_mapping is None:
            # Populate our room mapping
            self._room_mapping = {}
            postokay, response = self._fetch(url='rooms')
            if not postokay:
                return False

            # Response generally looks like this:
            # [
            #   {
            #     noindex: False,
            #     oneToOne: False,
            #     avatarUrl: 'https://path/to/avatar/url',
            #     url: '/apprise-notifications/community',
            #     public: True,
            #     tags: [],
            #     lurk: False,
            #     uri: 'apprise-notifications/community',
            #     lastAccessTime: '2019-03-25T00:12:28.144Z',
            #     topic: '',
            #     roomMember: True,
            #     groupId: '5c981cecd73408ce4fbbad2f',
            #     githubType: 'REPO_CHANNEL',
            #     unreadItems: 0,
            #     mentions: 0,
            #     security: 'PUBLIC',
            #     userCount: 1,
            #     id: '5c981cecd73408ce4fbbad31',
            #     name: 'apprise/community'
            #   }
            # ]
            for entry in response:
                self._room_mapping[entry['name'].lower().split('/')[0]] = {
                    # The ID of the room
                    'id': entry['id'],

                    # A descriptive name (useful for logging)
                    'uri': entry['uri'],
                }

        if len(targets) == 0:
            # No targets specified
            return False

        while len(targets):
            target = targets.pop(0).lower()

            if target not in self._room_mapping:
                self.logger.warning(
                    'Failed to locate Gitter room {}'.format(target))

                # Flag our error
                has_error = True
                continue

            # prepare our payload
            payload = {
                'text': body,
            }

            # Our Notification URL
            notify_url = 'rooms/{}/chatMessages'.format(
                self._room_mapping[target]['id'])

            # Perform our query
            postokay, response = self._fetch(
                notify_url, payload=dumps(payload), method='POST')

            if not postokay:
                # Flag our error
                has_error = True

        return not has_error

    def _fetch(self, url, payload=None, method='GET'):
        """
        Wrapper to request object

        """

        # Prepare our headers:
        headers = {
            'User-Agent': self.app_id,
            'Accept': 'application/json',
            'Authorization': 'Bearer ' + self.token,
        }
        if payload:
            # Only set our header payload if it's defined
            headers['Content-Type'] = 'application/json'

        # Default content response object
        content = {}

        # Update our URL
        url = '{}/{}'.format(GITTER_API_URL, url)

        # Some Debug Logging
        self.logger.debug('Gitter {} URL: {} (cert_verify={})'.format(
            method,
            url, self.verify_certificate))
        if payload:
            self.logger.debug('Gitter Payload: {}' .format(payload))

        # By default set wait to None
        wait = None

        if self.ratelimit_remaining == 0:
            # Determine how long we should wait for or if we should wait at
            # all. This isn't fool-proof because we can't be sure the client
            # time (calling this script) is completely synced up with the
            # Gitter server.  One would hope we're on NTP and our clocks are
            # the same allowing this to role smoothly:

            now = datetime.utcnow()
            if now < self.ratelimit_reset:
                # We need to throttle for the difference in seconds
                # We add 0.5 seconds to the end just to allow a grace
                # period.
                wait = (self.ratelimit_reset - now).total_seconds() + 0.5

        # Always call throttle before any remote server i/o is made
        self.throttle(wait=wait)

        # fetch function
        fn = requests.post if method == 'POST' else requests.get
        try:
            r = fn(
                url,
                data=payload,
                headers=headers,
                verify=self.verify_certificate,
            )

            if r.status_code != requests.codes.ok:
                # We had a problem
                status_str = \
                    NotifyGitter.http_response_code_lookup(r.status_code)

                self.logger.warning(
                    'Failed to send Gitter {} to {}: '
                    '{}error={}.'.format(
                        method,
                        url,
                        ', ' if status_str else '',
                        r.status_code))

                self.logger.debug(
                    'Response Details:\r\n{}'.format(r.content))

                # Mark our failure
                return (False, content)

            try:
                content = loads(r.content)

            except (TypeError, ValueError):
                # ValueError = r.content is Unparsable
                # TypeError = r.content is None
                content = {}

            try:
                self.ratelimit_remaining = \
                    int(r.headers.get('X-RateLimit-Remaining'))
                self.ratelimit_reset = datetime.utcfromtimestamp(
                    int(r.headers.get('X-RateLimit-Reset')))

            except (TypeError, ValueError):
                # This is returned if we could not retrieve this information
                # gracefully accept this state and move on
                pass

        except requests.RequestException as e:
            self.logger.warning(
                'Exception received when sending Gitter {} to {}: '.
                format(method, url))
            self.logger.debug('Socket Exception: %s' % str(e))

            # Mark our failure
            return (False, content)

        return (True, content)

    def url(self):
        """
        Returns the URL built dynamically based on specified arguments.
        """

        # Define any arguments set
        args = {
            'format': self.notify_format,
            'overflow': self.overflow_mode,
            'image': 'yes' if self.include_image else 'no',
            'verify': 'yes' if self.verify_certificate else 'no',
        }

        return '{schema}://{token}/{targets}/?{args}'.format(
            schema=self.secure_protocol,
            token=NotifyGitter.quote(self.token, safe=''),
            targets='/'.join(
                [NotifyGitter.quote(x, safe='') for x in self.targets]),
            args=NotifyGitter.urlencode(args))

    @staticmethod
    def parse_url(url):
        """
        Parses the URL and returns enough arguments that can allow
        us to substantiate this object.

        """
        results = NotifyBase.parse_url(url)

        if not results:
            # We're done early as we couldn't load the results
            return results

        results['token'] = NotifyGitter.unquote(results['host'])

        # Get our entries; split_path() looks after unquoting content for us
        # by default
        results['targets'] = NotifyGitter.split_path(results['fullpath'])

        # Support the 'to' variable so that we can support targets this way too
        # The 'to' makes it easier to use yaml configuration
        if 'to' in results['qsd'] and len(results['qsd']['to']):
            results['targets'] += NotifyGitter.parse_list(results['qsd']['to'])

        # Include images with our message
        results['include_image'] = \
            parse_bool(results['qsd'].get('image', False))

        return results
Пример #41
0
 def ping(self):
     self.last_seen = datetime.utcnow()
     db.session.add(self)
Пример #42
0
def now_as_float():
    return datetime_to_epoch(datetime.utcnow())
Пример #43
0
 def _set_time_start(cls, deployment_history):
     if not deployment_history.time_start:
         deployment_history.time_start = datetime.utcnow()
Пример #44
0
def update_user(self, sailthru_vars, email, site=None, new_user=False, send_welcome_email=False):
    """
    Adds/updates Sailthru profile information for a user.
     Args:
        sailthru_vars(dict): User profile information to pass as 'vars' to Sailthru
        email(str): User email address
        new_user(boolean): True if new registration
        send_welcome_email(boolean): True if a welcome email should be sent
    Returns:
        None
    """
    email_config = EmailMarketingConfiguration.current()
    if not email_config.enabled:
        return

    sailthru_client = SailthruClient(email_config.sailthru_key, email_config.sailthru_secret)
    try:
        sailthru_response = sailthru_client.api_post("user",
                                                     _create_email_user_param(sailthru_vars, sailthru_client,
                                                                              email, new_user, email_config,
                                                                              site=site))

    except SailthruClientError as exc:
        log.error("Exception attempting to add/update user %s in Sailthru - %s", email, unicode(exc))
        raise self.retry(exc=exc,
                         countdown=email_config.sailthru_retry_interval,
                         max_retries=email_config.sailthru_max_retries)

    if not sailthru_response.is_ok():
        error = sailthru_response.get_error()
        log.error("Error attempting to add/update user in Sailthru: %s", error.get_message())
        if _retryable_sailthru_error(error):
            raise self.retry(countdown=email_config.sailthru_retry_interval,
                             max_retries=email_config.sailthru_max_retries)
        return

    if send_welcome_email and email_config.sailthru_welcome_template and is_default_site(site) and not \
            sailthru_vars.get('is_enterprise_learner'):

        scheduled_datetime = datetime.utcnow() + timedelta(seconds=email_config.welcome_email_send_delay)
        try:
            sailthru_response = sailthru_client.api_post(
                "send",
                {
                    "email": email,
                    "template": email_config.sailthru_welcome_template,
                    "schedule_time": scheduled_datetime.strftime('%Y-%m-%dT%H:%M:%SZ')
                }
            )
        except SailthruClientError as exc:
            log.error("Exception attempting to send welcome email to user %s in Sailthru - %s", email, unicode(exc))
            raise self.retry(exc=exc,
                             countdown=email_config.sailthru_retry_interval,
                             max_retries=email_config.sailthru_max_retries)

        if not sailthru_response.is_ok():
            error = sailthru_response.get_error()
            log.error("Error attempting to send welcome email to user in Sailthru: %s", error.get_message())
            if _retryable_sailthru_error(error):
                raise self.retry(countdown=email_config.sailthru_retry_interval,
                                 max_retries=email_config.sailthru_max_retries)
Пример #45
0
def patch_utcnow(test):
    utcnow = test.patch(publication, "datetime").utcnow
    ref = utcnow.return_value = datetime.utcnow()
    return ref
Пример #46
0
def before_request():
    if current_user.is_authenticated:
        current_user.last_seen = datetime.utcnow()
        db.session.commit()
def create_message(type):
	message = {}
	message['type'] = type
	message['hostname'] = socket.gethostname()
	message['time'] = datetime.utcnow()
	return message
Пример #48
0
 def _set_time_end(cls, deployment_history):
     if not deployment_history.time_end:
         deployment_history.time_end = datetime.utcnow()
Пример #49
0
def unix_utc_now():
    d = datetime.utcnow()
    _unix = calendar.timegm(d.utctimetuple())

    return _unix
Пример #50
0
def index():
    return render_template('index.html',
                           current_time=datetime.utcnow(),
                           name=session.get("name"))
Пример #51
0
def get_file_meta(worker_name,
                  path,
                  cliargs,
                  reindex_dict,
                  statsembeded=False):
    """This is the get file meta data function.
    It scrapes file meta and ignores files smaller
    than minsize Bytes, newer than mtime
    and in excluded_files. Returns file meta dict.
    """

    try:
        # check if stats embeded in path
        if statsembeded:
            metadata = path[1]
            fullpath = path[0]
        else:
            fullpath = path

        filename = os.path.basename(fullpath)

        # check if file is in exluded_files list
        if file_excluded(filename):
            return None
        extension = os.path.splitext(filename)[1][1:].strip().lower()

        if statsembeded:
            # get embeded stats from path
            mode, ino, dev, nlink, uid, gid, size, atime, mtime, ctime, blocks = metadata
        else:
            # use lstat to get meta and not follow sym links
            s = os.lstat(fullpath)
            mode, ino, dev, nlink, uid, gid, size, atime, mtime, ctime = s
            blocks = s.st_blocks

        # Are we storing file size or on disk size
        if cliargs['sizeondisk']:
            size = blocks * cliargs['blocksize']

        # Skip files smaller than minsize cli flag
        if size < cliargs['minsize']:
            return None

        # Convert time in days (mtime cli arg) to seconds
        time_sec = cliargs['mtime'] * 86400
        file_mtime_sec = time.time() - mtime

        if time_sec < 0:
            # Only process files modified less than x days ago
            if file_mtime_sec > (time_sec * -1):
                return None
        else:
            # Only process files modified at least x days ago
            if file_mtime_sec < time_sec:
                return None

        # convert times to utc for es
        mtime_utc = datetime.utcfromtimestamp(mtime).isoformat()
        atime_utc = datetime.utcfromtimestamp(atime).isoformat()
        ctime_utc = datetime.utcfromtimestamp(ctime).isoformat()

        # get owner and group names
        owner, group = get_owner_group_names(uid, gid, cliargs)

        # create md5 hash of file using metadata filesize and mtime
        filestring = str(size) + str(mtime)
        filehash = hashlib.md5(filestring.encode('utf-8')).hexdigest()

        # get time
        indextime_utc = datetime.utcnow().isoformat()

        # get absolute path of parent directory
        parentdir = os.path.abspath(os.path.join(fullpath, os.pardir))

        # create file metadata dictionary
        filemeta_dict = {
            "filename": filename,
            "extension": extension,
            "path_parent": parentdir,
            "filesize": size,
            "owner": owner,
            "group": group,
            "last_modified": mtime_utc,
            "last_access": atime_utc,
            "last_change": ctime_utc,
            "hardlinks": nlink,
            "inode": str(ino),
            "filehash": filehash,
            "tag": "",
            "tag_custom": "",
            "dupe_md5": "",
            "worker_name": worker_name,
            "indexing_date": indextime_utc,
            "_type": "file"
        }

        # check plugins for adding extra meta data to filemeta_dict
        for plugin in plugins:
            try:
                # check if plugin is for file doc
                mappings = {'mappings': {'file': {'properties': {}}}}
                plugin.add_mappings(mappings)
                filemeta_dict.update(plugin.add_meta(fullpath))
            except KeyError:
                pass

        # add any autotags to filemeta_dict
        if cliargs['autotag'] and len(config['autotag_files']) > 0:
            filemeta_dict = auto_tag(filemeta_dict, 'file', mtime, atime,
                                     ctime)

        # add cost per gb to filemeta_dict
        if cliargs['costpergb']:
            filemeta_dict = cost_per_gb(filemeta_dict, fullpath, mtime, atime,
                                        ctime, 'file')

        # search for and copy over any existing tags from reindex_dict
        for sublist in reindex_dict['file']:
            if sublist[0] == fullpath:
                filemeta_dict['tag'] = sublist[1]
                filemeta_dict['tag_custom'] = sublist[2]
                break

    except (OSError, IOError) as e:
        warnings.warn("OS/IO Exception caused by: %s" % e)
        return False
    except Exception as e:
        warnings.warn("Exception caused by: %s" % e)
        return False

    return filemeta_dict
Пример #52
0
    def get(self):
        never = datetime(1900, 1, 1)

        user = self.current_user

        def sort_key(token):
            return (token.last_activity or never, token.created or never)

        now = datetime.utcnow()
        api_tokens = []
        for token in sorted(user.api_tokens, key=sort_key, reverse=True):
            if token.expires_at and token.expires_at < now:
                self.db.delete(token)
                self.db.commit()
                continue
            api_tokens.append(token)

        # group oauth client tokens by client id
        # AccessTokens have expires_at as an integer timestamp
        now_timestamp = now.timestamp()
        oauth_tokens = defaultdict(list)
        for token in user.oauth_tokens:
            if token.expires_at and token.expires_at < now_timestamp:
                self.log.warning("Deleting expired token")
                self.db.delete(token)
                self.db.commit()
                continue
            if not token.client_id:
                # token should have been deleted when client was deleted
                self.log.warning("Deleting stale oauth token for %s",
                                 user.name)
                self.db.delete(token)
                self.db.commit()
                continue
            oauth_tokens[token.client_id].append(token)

        # get the earliest created and latest last_activity
        # timestamp for a given oauth client
        oauth_clients = []
        for client_id, tokens in oauth_tokens.items():
            created = tokens[0].created
            last_activity = tokens[0].last_activity
            for token in tokens[1:]:
                if token.created < created:
                    created = token.created
                if last_activity is None or (
                        token.last_activity
                        and token.last_activity > last_activity):
                    last_activity = token.last_activity
            token = tokens[0]
            oauth_clients.append({
                'client':
                token.client,
                'description':
                token.client.description or token.client.identifier,
                'created':
                created,
                'last_activity':
                last_activity,
                'tokens':
                tokens,
                # only need one token id because
                # revoking one oauth token revokes all oauth tokens for that client
                'token_id':
                tokens[0].api_id,
                'token_count':
                len(tokens),
            })

        # sort oauth clients by last activity, created
        def sort_key(client):
            return (client['last_activity'] or never, client['created']
                    or never)

        oauth_clients = sorted(oauth_clients, key=sort_key, reverse=True)

        html = self.render_template('token.html',
                                    api_tokens=api_tokens,
                                    oauth_clients=oauth_clients)
        self.finish(html)
Пример #53
0
def register(show_spinner=False) -> str:
  params = Params()
  params.put("SubscriberInfo", HARDWARE.get_subscriber_info())

  IMEI = params.get("IMEI", encoding='utf8')
  HardwareSerial = params.get("HardwareSerial", encoding='utf8')
  dongle_id = params.get("DongleId", encoding='utf8')
  needs_registration = None in (IMEI, HardwareSerial, dongle_id)

  # create a key for auth
  # your private key is kept on your device persist partition and never sent to our servers
  # do not erase your persist partition
  if not os.path.isfile(PERSIST+"/comma/id_rsa.pub"):
    needs_registration = True
    cloudlog.warning("generating your personal RSA key")
    mkdirs_exists_ok(PERSIST+"/comma")
    assert os.system("openssl genrsa -out "+PERSIST+"/comma/id_rsa.tmp 2048") == 0
    assert os.system("openssl rsa -in "+PERSIST+"/comma/id_rsa.tmp -pubout -out "+PERSIST+"/comma/id_rsa.tmp.pub") == 0
    os.rename(PERSIST+"/comma/id_rsa.tmp", PERSIST+"/comma/id_rsa")
    os.rename(PERSIST+"/comma/id_rsa.tmp.pub", PERSIST+"/comma/id_rsa.pub")

  if needs_registration:
    if show_spinner:
      spinner = Spinner()
      spinner.update("registering device")

    # Create registration token, in the future, this key will make JWTs directly
    with open(PERSIST+"/comma/id_rsa.pub") as f1, open(PERSIST+"/comma/id_rsa") as f2:
      public_key = f1.read()
      private_key = f2.read()

    # Block until we get the imei
    imei1, imei2 = None, None
    while imei1 is None and imei2 is None:
      try:
        imei1, imei2 = HARDWARE.get_imei(0), HARDWARE.get_imei(1)
      except Exception:
        cloudlog.exception("Error getting imei, trying again...")
        time.sleep(1)

    serial = HARDWARE.get_serial()
    params.put("IMEI", imei1)
    params.put("HardwareSerial", serial)

    backoff = 0
    while True:
      try:
        register_token = jwt.encode({'register': True, 'exp': datetime.utcnow() + timedelta(hours=1)}, private_key, algorithm='RS256')
        cloudlog.info("getting pilotauth")
        resp = api_get("v2/pilotauth/", method='POST', timeout=15,
                       imei=imei1, imei2=imei2, serial=serial, public_key=public_key, register_token=register_token)

        if resp.status_code in (402, 403):
          cloudlog.info(f"Unable to register device, got {resp.status_code}")
          dongle_id = UNREGISTERED_DONGLE_ID
        else:
          dongleauth = json.loads(resp.text)
          dongle_id = dongleauth["dongle_id"]
        break
      except Exception:
        cloudlog.exception("failed to authenticate")
        backoff = min(backoff + 1, 15)
        time.sleep(backoff)

    if show_spinner:
      spinner.close()

  if dongle_id:
    params.put("DongleId", dongle_id)
    set_offroad_alert("Offroad_UnofficialHardware", dongle_id == UNREGISTERED_DONGLE_ID)
  return dongle_id
Пример #54
0
def scrape_tree_meta(paths, cliargs, reindex_dict):
    global worker
    tree_dirs = []
    tree_files = []
    totalcrawltime = 0
    statsembeded = False

    path_count = 0
    for path in paths:
        path_count += 1
        starttime = time.time()
        if not cliargs['dirsonly']:
            root, dirs, files = path
        else:
            root, dirs = path
            files = []
        if path_count == 1:
            if type(root) is tuple:
                statsembeded = True
        # check if stats embeded in data from diskover tree walk client or crawlapi
        if statsembeded:
            root_path = root[0]
            dmeta = get_dir_meta(worker,
                                 root,
                                 cliargs,
                                 reindex_dict,
                                 statsembeded=True)
        else:
            root_path = root
            dmeta = get_dir_meta(worker,
                                 root_path,
                                 cliargs,
                                 reindex_dict,
                                 statsembeded=False)

        if dmeta == "sametimes":
            # fetch meta data for directory and all it's files (doc sources) from index2 since
            # directory times haven't changed
            dir_source, files_source = get_metadata(root_path, cliargs)
            datenow = datetime.utcnow().isoformat()
            for file_source in files_source:
                # update indexed at time
                file_source['indexing_date'] = datenow
                # update worker name
                file_source['worker_name'] = worker
                tree_files.append(('file', file_source))
            if dir_source:
                # update indexed at time
                dir_source['indexing_date'] = datenow
                # update worker name
                dir_source['worker_name'] = worker
                # update crawl time
                elapsed = time.time() - starttime
                dir_source['crawl_time'] = round(elapsed, 6)
                tree_dirs.append(dir_source)
                totalcrawltime += elapsed
        # get meta off disk since times different in Redis than on disk
        elif dmeta:
            # no files in batch, get them with scandir
            if cliargs['dirsonly']:
                for entry in scandir(root):
                    if entry.is_file(
                            follow_symlinks=False) and not file_excluded(
                                entry.name):
                        files.append(entry.name)
            filecount = 0
            for file in files:
                if statsembeded:
                    fmeta = get_file_meta(worker,
                                          file,
                                          cliargs,
                                          reindex_dict,
                                          statsembeded=True)
                else:
                    fmeta = get_file_meta(worker,
                                          os.path.join(root_path, file),
                                          cliargs,
                                          reindex_dict,
                                          statsembeded=False)
                if fmeta:
                    tree_files.append(fmeta)
                    filecount += 1

            # update crawl time=
            elapsed = time.time() - starttime
            dmeta['crawl_time'] = round(elapsed, 6)
            # check for empty dirs and dirsonly cli arg
            if cliargs['indexemptydirs']:
                tree_dirs.append(dmeta)
            elif not cliargs['indexemptydirs'] and (len(dirs) > 0
                                                    or filecount > 0):
                tree_dirs.append(dmeta)
            totalcrawltime += elapsed

        # check if doc count is more than es chunksize and bulk add to es
        if len(tree_dirs) + len(tree_files) >= config['es_chunksize']:
            es_bulk_add(worker, tree_dirs, tree_files, cliargs, totalcrawltime)
            del tree_dirs[:]
            del tree_files[:]
            totalcrawltime = 0

    # bulk add to es
    if len(tree_dirs) > 0 or len(tree_files) > 0:
        es_bulk_add(worker, tree_dirs, tree_files, cliargs, totalcrawltime)
Пример #55
0
 def _genThreadId(self):
     time_stamp = datetime.utcnow().isoformat()
     return hashlib.md5(time_stamp).hexdigest()
Пример #56
0
def get_dir_meta(worker_name, path, cliargs, reindex_dict, statsembeded=False):
    """This is the get directory meta data function.
    It gets directory metadata and returns dir meta dict.
    It checks if meta data is in Redis and compares times
    mtime and ctime on disk compared to Redis and if same
    returns sametimes string.
    """

    try:
        if statsembeded:
            metadata = path[1]
            dirpath = path[0]
            # get directory meta embeded in path
            mode, ino, dev, nlink, uid, gid, size, atime, mtime, ctime = metadata
        else:
            # get directory meta using lstat
            dirpath = path
            mode, ino, dev, nlink, uid, gid, size, atime, mtime, ctime = os.lstat(
                dirpath)

        # convert times to utc for es
        mtime_utc = datetime.utcfromtimestamp(mtime).isoformat()
        atime_utc = datetime.utcfromtimestamp(atime).isoformat()
        ctime_utc = datetime.utcfromtimestamp(ctime).isoformat()

        if cliargs['index2']:
            # check if directory times cached in Redis
            redis_dirtime = redis_conn.get(
                base64.encodestring(dirpath.encode('utf-8', errors='ignore')))
            if redis_dirtime:
                cached_times = float(redis_dirtime.decode('utf-8'))
                # check if cached times are the same as on disk
                current_times = float(mtime + ctime)
                if cached_times == current_times:
                    return "sametimes"

        # get time now in utc
        indextime_utc = datetime.utcnow().isoformat()

        # get owner and group names
        owner, group = get_owner_group_names(uid, gid, cliargs)

        filename = os.path.basename(dirpath)
        parentdir = os.path.abspath(os.path.join(dirpath, os.pardir))

        dirmeta_dict = {
            "filename": filename,
            "path_parent": parentdir,
            "filesize": 0,
            "items": 1,  # 1 for itself
            "items_files": 0,
            "items_subdirs": 0,
            "last_modified": mtime_utc,
            "last_access": atime_utc,
            "last_change": ctime_utc,
            "hardlinks": nlink,
            "inode": str(ino),
            "owner": owner,
            "group": group,
            "tag": "",
            "tag_custom": "",
            "crawl_time": 0,
            "change_percent_filesize": "",
            "change_percent_items": "",
            "change_percent_items_files": "",
            "change_percent_items_subdirs": "",
            "costpergb": "",
            "worker_name": worker_name,
            "indexing_date": indextime_utc,
            "_type": "directory"
        }

        # check plugins for adding extra meta data to dirmeta_dict
        for plugin in plugins:
            try:
                # check if plugin is for directory doc
                mappings = {'mappings': {'directory': {'properties': {}}}}
                plugin.add_mappings(mappings)
                dirmeta_dict.update(plugin.add_meta(dirpath))
            except KeyError:
                pass

        # add any autotags to dirmeta_dict
        if cliargs['autotag'] and len(config['autotag_dirs']) > 0:
            dirmeta_dict = auto_tag(dirmeta_dict, 'directory', mtime, atime,
                                    ctime)

        # search for and copy over any existing tags from reindex_dict
        for sublist in reindex_dict['directory']:
            if sublist[0] == dirpath:
                dirmeta_dict['tag'] = sublist[1]
                dirmeta_dict['tag_custom'] = sublist[2]
                break

    except (OSError, IOError) as e:
        warnings.warn("OS/IO Exception caused by: %s" % e)
        return False
    except Exception as e:
        warnings.warn("Exception caused by: %s" % e)
        raise

    # cache directory times in Redis, encode path (key) using base64
    if config['redis_cachedirtimes'] == 'true':
        redis_conn.set(base64.encodestring(
            dirpath.encode('utf-8', errors='ignore')),
                       mtime + ctime,
                       ex=config['redis_dirtimesttl'])

    return dirmeta_dict
Пример #57
0
def _get_models_dict():
    """Downloads the ditionary of models definitions.

    File will be re-downloaded every 30 minutes (upon request) to update
    the models definitions from repository.

    Returns
    -------
    models_dict : dict
        Dictionary with models definitions. Each key is an available model.
        Each model entry is defined by:
         - "model", path to the script with model definition
         - "state", path to the archive containing the pre-saved model state
         - "model_md5", md5 checksum of model definition
         - "state_md5", md5 checksum of pre-saved model state

    """
    # The `.last_update` contains the last time MODELS_DICT_FILE
    # has been download. Read the last update time if this file is available.
    # Otherwise the file will be created later
    last_update_path = fm.join(SECML_MODELS_DIR, '.last_update')
    last_update_format = "%d %m %Y %H:%M"  # Specific format to avoid locale
    current_datetime = datetime.utcnow()  # UTC datetime to avoid locale

    update_models_dict = None  # Trigger flag for model definitions update
    if fm.file_exist(MODELS_DICT_PATH):
        update_models_dict = True  # By default, trigger update
        if fm.file_exist(last_update_path):
            try:
                with open(last_update_path) as fp:
                    last_update = \
                        datetime.strptime(fp.read(), last_update_format)
                    # Compute the threshold for triggering an update
                    last_update_th = last_update + timedelta(minutes=30)
            except ValueError as e:
                # Error occurred while parsing the last update date from file
                # Clean it and re-create later. Definitions update stays True
                _logger.debug(e)  # Log the error for debug purposes
                _logger.debug("Removing `{:}`".format(last_update_path))
                fm.remove_file(last_update_path)
            else:
                # Do not trigger update if last update threshold is not passed
                if current_datetime < last_update_th:
                    update_models_dict = False

    if update_models_dict is not False:
        # if update_models_dict is None means that models dict is not available
        # if it is True means that an update has been triggered
        # Either cases, we need to download the data and extract it

        try:  # Catch download errors

            # Download definitions from current version's branch first,
            # then from master branch
            _dl_data_versioned(MODELS_DICT_FILE, SECML_MODELS_DIR)

        except Exception as e:
            if update_models_dict is None:
                # If update_models_dict is still None, means that models dict
                # is not available, so we propagate the error. Otherwise pass
                raise e
            _logger.debug(e)  # Log the error for debug purposes
            _logger.debug("Error when updating the models definitions. "
                          "Using the last available ones...")

        else:  # No error raised during download process

            # Check if file has been correctly downloaded
            if not fm.file_exist(MODELS_DICT_PATH):
                raise RuntimeError(
                    'Something wrong happened while downloading the '
                    'models definitions. Please try again.')

            # Update or create the "last update" file
            with open(last_update_path, "w") as fp:
                fp.write(current_datetime.strftime(last_update_format))

    with open(MODELS_DICT_PATH) as fp:
        return json.loads(fp.read())
Пример #58
0
from airflow import DAG
from datetime import datetime, timedelta
from airflow.contrib.operators.kubernetes_pod_operator import KubernetesPodOperator
from airflow.operators.dummy_operator import DummyOperator

default_args = {
    'owner': 'airflow',
    'depends_on_past': False,
    'start_date': datetime.utcnow(),
    'email': ['*****@*****.**'],
    'email_on_failure': False,
    'email_on_retry': False,
    'retries': 1,
    'retry_delay': timedelta(minutes=5)
}

dag = DAG('kubernetes_sample',
          default_args=default_args,
          schedule_interval=timedelta(minutes=1))

start = DummyOperator(task_id='run_this_first', dag=dag)

passing = KubernetesPodOperator(namespace='default',
                                image="Python:3.6",
                                cmds=["Python", "-c"],
                                arguments=["print('hello world')"],
                                labels={"foo": "bar"},
                                name="passing-test",
                                task_id="passing-task",
                                get_logs=True,
                                dag=dag)
 def do(self, password):
     self._time = datetime.utcnow()
     self.page.generar_extraccion(self._transferencia.montos,
                                  str(self._transferencia.observaciones),
                                  password)
     return self
Пример #60
0
def upgrade():
    # Rename score table to answer_criterion_score
    try:
        # expected foreign key to follow naming conventions
        with op.batch_alter_table('score',
                                  naming_convention=convention) as batch_op:
            # drop the fk before altering the column
            batch_op.drop_constraint('fk_score_assignment_id_assignment',
                                     'foreignkey')
            batch_op.drop_constraint('fk_score_answer_id_answer', 'foreignkey')
            batch_op.drop_constraint('fk_score_criterion_id_criterion',
                                     'foreignkey')
    except sa.exc.InternalError:
        # if not, it is likely this name
        with op.batch_alter_table('score') as batch_op:
            # drop the fk before altering the column
            batch_op.drop_constraint('score_ibfk_1', 'foreignkey')
            batch_op.drop_constraint('score_ibfk_2', 'foreignkey')
            batch_op.drop_constraint('score_ibfk_3', 'foreignkey')

    op.rename_table('score', 'answer_criterion_score')

    with op.batch_alter_table('answer_criterion_score',
                              naming_convention=convention) as batch_op:
        # create the fk
        batch_op.create_foreign_key(
            'fk_answer_criterion_score_assignment_id_assignment',
            'assignment', ['assignment_id'], ['id'],
            ondelete="CASCADE")
        batch_op.create_foreign_key(
            'fk_answer_criterion_score_answer_id_answer',
            'answer', ['answer_id'], ['id'],
            ondelete="CASCADE")
        batch_op.create_foreign_key(
            'fk_answer_criterion_score_criterion_id_criterion',
            'criterion', ['criterion_id'], ['id'],
            ondelete="CASCADE")

    # create new answer_score table
    answer_score_table = op.create_table(
        'answer_score',
        sa.Column('id', sa.Integer(), nullable=False),
        sa.Column('assignment_id', sa.Integer(), nullable=False),
        sa.Column('answer_id', sa.Integer(), nullable=False),
        sa.Column('scoring_algorithm',
                  EnumType(ScoringAlgorithm),
                  nullable=True),
        sa.Column('score', sa.Float(), nullable=False),
        sa.Column('variable1', sa.Float(), nullable=True),
        sa.Column('variable2', sa.Float(), nullable=True),
        sa.Column('rounds', sa.Integer(), nullable=False),
        sa.Column('wins', sa.Integer(), nullable=False),
        sa.Column('loses', sa.Integer(), nullable=False),
        sa.Column('opponents', sa.Integer(), nullable=False),
        sa.Column('modified_user_id', sa.Integer(), nullable=True),
        sa.Column('modified', sa.DateTime(), nullable=False),
        sa.Column('created_user_id', sa.Integer(), nullable=True),
        sa.Column('created', sa.DateTime(), nullable=False),
        sa.ForeignKeyConstraint(['answer_id'], ['answer.id'],
                                ondelete='CASCADE'),
        sa.ForeignKeyConstraint(['assignment_id'], ['assignment.id'],
                                ondelete='CASCADE'),
        sa.ForeignKeyConstraint(['created_user_id'], ['user.id'],
                                ondelete='SET NULL'),
        sa.ForeignKeyConstraint(['modified_user_id'], ['user.id'],
                                ondelete='SET NULL'),
        sa.PrimaryKeyConstraint('id'),
        sa.UniqueConstraint('answer_id'),
        mysql_charset='utf8',
        mysql_collate='utf8_unicode_ci',
        mysql_engine='InnoDB')
    op.create_index(op.f('ix_answer_score_score'),
                    'answer_score', ['score'],
                    unique=False)

    # migrate aggregate data from answer_criteria_score to answer_score
    answer_criterion_score_table = sa.table(
        'answer_criterion_score', sa.column('id', sa.Integer),
        sa.Column('assignment_id', sa.Integer),
        sa.Column('answer_id', sa.Integer),
        sa.Column('criterion_id', sa.Integer),
        sa.Column('scoring_algorithm', EnumType(ScoringAlgorithm)),
        sa.Column('score', sa.Float), sa.Column('variable1', sa.Float),
        sa.Column('variable2', sa.Float), sa.Column('rounds', sa.Integer),
        sa.Column('wins', sa.Integer), sa.Column('loses', sa.Integer),
        sa.Column('opponents', sa.Integer))

    connection = op.get_bind()

    answer_scores = {}
    for answer_criterion_score in connection.execute(
            answer_criterion_score_table.select()):
        answer_scores.setdefault(
            answer_criterion_score.answer_id,
            {
                'assignment_id': answer_criterion_score.assignment_id,
                'answer_id': answer_criterion_score.answer_id,
                'scoring_algorithm': answer_criterion_score.scoring_algorithm,
                'rounds': answer_criterion_score.rounds,
                'wins': answer_criterion_score.wins,
                'loses': answer_criterion_score.loses,
                'opponents': answer_criterion_score.opponents,
                # to be averaged
                'score': [],
                'variable1': [],
                'variable2': []
            })

        answer_scores[answer_criterion_score.answer_id]['score'].append(
            answer_criterion_score.score)
        if answer_criterion_score.variable1:
            answer_scores[
                answer_criterion_score.answer_id]['variable1'].append(
                    answer_criterion_score.variable1)
        if answer_criterion_score.variable2:
            answer_scores[
                answer_criterion_score.answer_id]['variable2'].append(
                    answer_criterion_score.variable2)

    for answer_id, score in answer_scores.items():
        average_score = sum(score['score']) / float(len(
            score['score'])) if len(score['score']) else 0
        average_variable1 = sum(score['variable1']) / float(
            len(score['variable1'])) if len(score['variable1']) else None
        average_variable2 = sum(score['variable2']) / float(
            len(score['variable2'])) if len(score['variable2']) else None

        connection.execute(answer_score_table.insert().values(
            assignment_id=score['assignment_id'],
            answer_id=score['answer_id'],
            scoring_algorithm=score['scoring_algorithm'],
            rounds=score['rounds'],
            wins=score['wins'],
            loses=score['loses'],
            opponents=score['opponents'],
            score=average_score,
            variable1=average_variable1,
            variable2=average_variable2,
            modified=datetime.utcnow(),
            created=datetime.utcnow()))