Ejemplo n.º 1
0
def _print_item_status(item, now, longest):
    paddedname = (' ' * (longest - len(item['path']))) + item['path']
    if 'failure_at' in item:
        ts = datetime.fromtimestamp(item['started_at'])
        prog = 'Failed %s (%s)' % (abbreviate_time(now - ts), ts)
    elif item['percent_done'] < 100.0:
        if 'started_at' not in item:
            prog = 'not yet started'
        else:
            so_far = now - datetime.fromtimestamp(item['started_at'])
            if so_far.seconds > 0.0:
                rate = item['percent_done'] / so_far.seconds
                if rate != 0:
                    time_left = (100.0 - item['percent_done']) / rate
                    prog = '%2.1f%% done, around %s left' % (
                        item['percent_done'],
                        abbreviate_time(time_left),
                    )
                else:
                    time_left = None
                    prog = '%2.1f%% done' % (item['percent_done'],)
            else:
                prog = 'just started'
    else:
        prog = ''
        for verb in ['finished', 'started', 'queued']:
            keyname = verb + '_at'
            if keyname in item:
                when = datetime.fromtimestamp(item[keyname])
                prog = '%s %s' % (verb, abbreviate_time(now - when))
                break

    print "  %s: %s" % (paddedname, prog)
Ejemplo n.º 2
0
    def execute(self):
        if len(self.queries) == 0:
            # Empty batch is a no-op
            return

        opener = 'BEGIN ' + (self.batch_type + ' ' if self.batch_type else '') + ' BATCH'
        if self.timestamp:

            if isinstance(self.timestamp, (int, long)):
                ts = self.timestamp
            elif isinstance(self.timestamp, timedelta):
                ts = long((datetime.now() + self.timestamp - datetime.fromtimestamp(0)).total_seconds() * 1000000)
            elif isinstance(self.timestamp, datetime):
                ts = long((self.timestamp - datetime.fromtimestamp(0)).total_seconds() * 1000000)
            else:
                raise ValueError("Batch expects a long, a timedelta, or a datetime")

            opener += ' USING TIMESTAMP {}'.format(ts)

        query_list = [opener]
        parameters = {}
        ctx_counter = 0
        for query in self.queries:
            query.update_context_id(ctx_counter)
            ctx = query.get_context()
            ctx_counter += len(ctx)
            query_list.append('  ' + str(query))
            parameters.update(ctx)

        query_list.append('APPLY BATCH;')

        execute('\n'.join(query_list), parameters, self._consistency)

        self.queries = []
Ejemplo n.º 3
0
def get_feed_entries(feeds):
    entries = []

    for feed in feeds:
        d = feedparser.parse(feed.get('feed_url'))

        for entry in d.entries:
            entry.publisher = feed['title']
            # entry.publisher_icon = feed['icon']

            if 'media_content' in entry:
                if entry.media_content[0]['medium'] == 'image':
                    entry.image = entry.media_content[0]['url']
            elif 'content' in entry:
                soup = BeautifulSoup(entry.content[0]['value'], 'html.parser')
                image = soup.find_all('img')[0]
                entry.image = image.get('src')

            published = datetime.fromtimestamp(mktime(entry.published_parsed))
            updated = datetime.fromtimestamp(mktime(entry.updated_parsed))

            entry.published = published
            entry.updated = updated

            entries.append(entry)

    return sorted(entries, key=attrgetter('published'), reverse=True)
Ejemplo n.º 4
0
    def expiration_timedelta(self):
        """Return provider session live seconds. Returns a timedelta ready to
        use with session.set_expiry().

        If provider returns a timestamp instead of session seconds to live, the
        timedelta is inferred from current time (using UTC timezone). None is
        returned if there's no value stored or it's invalid.
        """
        if self.extra_data and 'expires' in self.extra_data:
            try:
                expires = int(self.extra_data.get('expires'))
            except (ValueError, TypeError):
                return None

            now = datetime.utcnow()

            # Detect if expires is a timestamp
            if expires > time.mktime(now.timetuple()):
                # expires is a datetime, return the remaining difference
                return datetime.fromtimestamp(expires) - now
            else:
                # expires is the time to live seconds since creation,
                # check against auth_time if present, otherwise return
                # the value
                auth_time = self.extra_data.get('auth_time')
                if auth_time:
                    reference = datetime.fromtimestamp(auth_time)
                    return (reference + timedelta(seconds=expires)) - now
                else:
                    return timedelta(seconds=expires)
Ejemplo n.º 5
0
Archivo: build.py Proyecto: CDees/dxr
def build_folder(tree, conn, folder, indexed_files, indexed_folders):
    """Build an HTML index file for a single folder."""
    # Create the subfolder if it doesn't exist:
    ensure_folder(os.path.join(tree.target_folder, folder))

    # Build the folder listing:
    # Name is either basename (or if that is "" name of tree)
    name = os.path.basename(folder) or tree.name

    # Generate list of folders and their mod dates:
    folders = [('folder',
                f,
                datetime.fromtimestamp(stat(os.path.join(tree.source_folder,
                                                         folder,
                                                         f)).st_mtime),
                # TODO: DRY with Flask route. Use url_for:
                _join_url(tree.name, 'source', folder, f))
               for f in indexed_folders]

    # Generate list of files:
    files = []
    for f in indexed_files:
        # Get file path on disk
        path = os.path.join(tree.source_folder, folder, f)
        file_info = stat(path)
        files.append((dxr.mime.icon(path),
                      f,
                      datetime.fromtimestamp(file_info.st_mtime),
                      file_info.st_size,
                      _join_url(tree.name, 'source', folder, f)))

    # Lay down the HTML:
    jinja_env = load_template_env(tree.config.temp_folder,
                                  tree.config.dxrroot)
    dst_path = os.path.join(tree.target_folder,
                            folder,
                            tree.config.directory_index)

    _fill_and_write_template(
        jinja_env,
        'folder.html',
        dst_path,
        {# Common template variables:
         'wwwroot': tree.config.wwwroot,
         'tree': tree.name,
         'tree_tuples': [(t.name,
                          browse_url(t.name, tree.config.wwwroot, folder),
                          t.description)
                         for t in tree.config.sorted_tree_order],
         'generated_date': tree.config.generated_date,
         'paths_and_names': linked_pathname(folder, tree.name),
         'filters': filter_menu_items(tree.config.filter_language),
         # Autofocus only at the root of each tree:
         'should_autofocus_query': folder == '',

         # Folder template variables:
         'name': name,
         'path': folder,
         'folders': folders,
         'files': files})
Ejemplo n.º 6
0
 def __str__(self):
     return ', '.join('{0}={1}'.format(k, v) for k, v in [
         ('cxn', '{0}:{1}/{2}'.format(self.cxn.params.host, self.cxn.params.port, self.cxn.params.virtual_host)),
         ('channel', '{0}'.format(int(self.channel) if self.channel is not None else self.channel)),
         ('created_at', '{0}'.format(datetime.fromtimestamp(self.created_at).isoformat())),
         ('released_at', '{0}'.format(datetime.fromtimestamp(self.released_at).isoformat())),
     ])
Ejemplo n.º 7
0
    def forwards(self, orm):
		for w in orm.Walkby.objects.all():
			w.datetime = datetime2.fromtimestamp(w.time)
			w.save()
		for v in orm.Visit.objects.all():
			v.datetime = datetime2.fromtimestamp(v.time)
			v.save()
def generate_CSV_traffic_file():
    tcpstat_file     = open(IN_DIR + "/" + IN_2, "r")
    header_list      = ["Second_ID", "Bandwidth_[bps]", "Timestamp", "Tcpstat_Timestamp", "Received_Packets",
                        "Packet_Average_Size", "Packet_Size_Standard_Deviation"]
    created_csv_file = OUT_DIR + "/" + str(TRAFFIC_GRAPH_ID) + "-downloads_" + \
                       datetime.fromtimestamp(time()).strftime('%Y-%m-%d_%H-%M-%S') + ".csv"

    UID = 0
    with open(created_csv_file, "wb") as csv_file:
        csv_writer = csv.writer(csv_file)
        csv_writer.writerow(header_list)
        for tcpstat_line in tcpstat_file.readlines():
            formatted_row      = list()
            tcpstat_line_split = tcpstat_line.split("\t")

            bps                = ((tcpstat_line_split[BANDWITDH_BPS_FIELD].split("="))[1]).rstrip()
            tcpstat_timestamp  = (tcpstat_line_split[TCPSTAT_TIMESTAMP_FIELD].split(":"))[1]
            timestamp          = datetime.fromtimestamp(int(tcpstat_timestamp)).strftime('%Y-%m-%d %H:%M:%S')
            rcv_packets        = (tcpstat_line_split[RECEIVED_PACKETS_FIELD].split("="))[1]
            packet_avg_size    = (tcpstat_line_split[PACKET_AVG_SIZE_FIELD].split("="))[1]
            packet_std_dev     = (tcpstat_line_split[PACKET_SIZE_DEV_FIELD].split("="))[1]

            formatted_row.append(UID); formatted_row.append(bps); formatted_row.append(timestamp);
            formatted_row.append(tcpstat_timestamp); formatted_row.append(rcv_packets);
            formatted_row.append(packet_avg_size); formatted_row.append(packet_std_dev);

            csv_writer.writerow(formatted_row)
            UID += 1
Ejemplo n.º 9
0
    def __init__(self, fullpath, owner=None):
        for check in self.checks:
            if not check( fullpath ):
                raise http.Http404('Path not found `%s` or IADMIN_FM_ROOT not configured in settings' % fullpath)

        self.name = self.basename = os.path.basename(fullpath) or ROOT_NAME
        self.parent = os.path.dirname(fullpath)

        self.absolute_path = fullpath # file system absolute path
        self.relative_path = utils.relpath(fullpath, utils.get_document_root())

        self.path = self.relative_path.split('/')
        self.mime = mimetypes.guess_type(self.absolute_path, False)[0] or ''
        self.can_read = os.access(self.absolute_path, os.R_OK)
        self.can_write = os.access(self.absolute_path, os.W_OK)
        self.is_link = os.path.islink(self.absolute_path)
        try:
            itemstat = os.stat(self.absolute_path)
            self.user = getpwuid(itemstat.st_uid)[0]
            self.group = getgrgid(itemstat.st_gid)[0]
            self.size = itemstat.st_size
            self.ctime = datetime.fromtimestamp(itemstat.st_ctime)
            self.mtime = datetime.fromtimestamp(itemstat.st_mtime)
            octs = "%04d" % int(oct(itemstat.st_mode & 0777))
            self.perms_numeric = octs
            self.perms = "%s%s%s" % (perms[int(octs[1])],
                                     perms[int(octs[2])],
                                     perms[int(octs[3])])
        except:
            self.user = self.group = self.perms_numeric = self.perms = ''
            self.size = self.ctime = self.mtime = None
 def prepare_value(self, qtuple):
     if self._is_range(qtuple):
         qtuple[1][0] = qtuple[1][0] or self.min_ts
         qtuple[1][1] = qtuple[1][1] or self.max_ts
         qtuple[1] = (datetime.fromtimestamp(qtuple[1][0]),
                      datetime.fromtimestamp(qtuple[1][1]))
     return tuple(qtuple)
Ejemplo n.º 11
0
def parse_commits(head, name):
    """
    Go through the git repository log and generate a document per commit
    containing all the metadata.
    """
    for commit in head.traverse():
        yield {
            '_id': commit.hexsha,
            '_parent': name,
            'committed_date': datetime.fromtimestamp(commit.committed_date),
            'committer': {
                'name': commit.committer.name,
                'email': commit.committer.email,
            },
            'authored_date': datetime.fromtimestamp(commit.authored_date),
            'author': {
                'name': commit.author.name,
                'email': commit.author.email,
            },
            'description': commit.message,
            'parent_shas': [p.hexsha for p in commit.parents],
            # we only care about the filenames, not the per-file stats
            'files': list(commit.stats.files),
            'stats': commit.stats.total,
        }
Ejemplo n.º 12
0
def validate_json_build(candidate_path):
    """ validate that all nodes are built """

    logger.info("validating node build status...")

    json_filepath = os.path.join(candidate_path, 'build_status.json')

    start_build_timestamp = os.path.getctime(json_filepath)
    start_build_time = datetime.fromtimestamp(start_build_timestamp)

    with open(json_filepath) as file_handle:
        json_bd = json.loads(file_handle.read())

        failed_to_build = []

        #check status
        for node in json_bd:
            status = json_bd[node]['status']
            timestamp = datetime.fromtimestamp(json_bd[node]['epochsecs'])

            if status != 'OK':
                failed_to_build.append(node)

            build_delta = timestamp - start_build_time

            if  build_delta > timedelta(minutes=60):
                raise NodeTimestampOutOfRange(
                    "Node %s build looks stale (%s, and build started @ %s)" %
                    (node, timestamp, start_build_time))

        if failed_to_build:
            failed_str = ', '.join(failed_to_build)
            err_msg = 'Nodes {} failed to build'.format(failed_str)
            raise NodesFailedToBuild(err_msg)
Ejemplo n.º 13
0
def get_datetime(obj):
    if isinstance(obj, datetime):
        return obj
    elif (isinstance(obj, float) or
          isinstance(obj, int)) and not math.isnan(obj):
        return datetime.fromtimestamp(obj)
    elif isinstance(obj, long):
        return datetime.fromtimestamp(obj / 1000)
    elif isinstance(obj, basestring):
        patterns = ['%Y-%m-%d %X.%f', '%Y-%m-%d %X']
        ok = False
        for p in patterns:
            try:
                dt = datetime.strptime(
                    obj, p
                )
                ok = True
            except ValueError:
                continue
        if ok:
            return dt
        else:
            raise ValueError('Bad datetime format for {}'.format(patterns))
    else:
        raise TypeError('Unacceptable type {}, {}'.format(type(obj), obj))
Ejemplo n.º 14
0
def poll():
	db = dbal.DBAL()
	print "getting sensors"

	protocollength = 20
	modellength = 20
	valuelength = 20

	protocol = create_string_buffer(protocollength)
	model = create_string_buffer(modellength)
	idvalue = c_int()
	dataTypes = c_int()
	while(lib.tdSensor(protocol, protocollength, model, modellength, byref(idvalue), byref(dataTypes)) == 0):
		print "Sensor: ", protocol.value, model.value, "id:", idvalue.value
		value = create_string_buffer(valuelength)
		timestampvalue = c_int()

		if((dataTypes.value & TELLSTICK_TEMPERATURE) != 0):
			success = lib.tdSensorValue(protocol.value, model.value, idvalue.value, TELLSTICK_TEMPERATURE, value, valuelength, byref(timestampvalue))
			print "Temperature: ", value.value, "C,", datetime.fromtimestamp(timestampvalue.value)
			if db.get_device(int(idvalue.value)) is not None:
				db.insert_sensor_data(int(idvalue.value), value.value)

		if((dataTypes.value & TELLSTICK_HUMIDITY) != 0):
			success = lib.tdSensorValue(protocol.value, model.value, idvalue.value, TELLSTICK_HUMIDITY, value, valuelength, byref(timestampvalue))
			print "Humidity: ", value.value, "%,", datetime.fromtimestamp(timestampvalue.value)

	print " "
Ejemplo n.º 15
0
def random_nodes(n = 50000):
    conn = pymongo.Connection()
    DB = conn.sina
    NODES = DB.nodes
    count = {}
    '''
    random attributes
    '''
    rand = [x+1 for x in xrange(1185071)]
    shuffle(rand)
    spammers = []
 
    spammers = {}.fromkeys(spammers)
    for attr in ['gender','created_at','friends_count','followers_count','statuses_count']:
        f = io.open("/Volumes/Data/asxzy/datasets/spammer/weibo/cluster/"+attr+"/random.tab","wb")
        for node in rand[:n]:
            node = NODES.find_one({"random":node})
            if node == None:
                continue
            if attr == 'created_at':
                t = datetime.fromtimestamp(node[attr])
                s = time.strptime('200908','%Y%m')
                s = time.mktime(s)
                s = datetime.fromtimestamp(s)
                node['created_at'] = rrule.rrule(rrule.WEEKLY, dtstart=s, until=t).count()
            elif attr == 'location':
                if len(node[attr]) == 0:
                    node[attr] == 'Unknown'
            try:
                f.write(str(node[attr])+'\n')
            except:
                f.write(str(node[attr].encode('utf-8'))+'\n')
        f.close()
        print "Done",attr
Ejemplo n.º 16
0
    def set_json_aux_matches(self):
        match = None
        count = 1
        if not self.html_aux_matches:
            return None

        for td in self.html_aux_matches.find_all('td'):
            match = td.find("div", {'data-type': 'matches'})
            if count == 1:
                self.last_match_json = {
                    'match_id': match.find("div", {'data-type': 'matches'}).attrs.get('data-id'),
                    'date': datetime.fromtimestamp(time.mktime(time.strptime(match.find("div", {'class': 'm-date'}).attrs.get('data-matchdate'), '%Y%m%d'))),
                    'date_string': match.find("div", {'class': 'm-date'}).attrs.get('data-matchdate'),
                    'home': match.find("div", {'class': 'home'}).find('span', {'class': 't-nText'}).get_text(),
                    'home_logo': match.find('div', {'class': 'home'}).find('img', {'class': 't-i-3-logo'}).attrs.get('src'),
                    'away': match.find('div', {'class': 'away'}).find('span', {'class': 't-nText'}).get_text(),
                    'away_logo': match.find('div', {'class': 'away'}).find('img', {'class': 't-i-3-logo'}).attrs.get('src'),
                    'res': match.find('span', {'class': 's-resText'}).get_text(),
                    'is_res': True
                }
            else:
                self.next_match_json = {
                    'match_id': match.find("div", {'data-type': 'matches'}).attrs.get('data-id'),
                    'date': datetime.fromtimestamp(time.mktime(time.strptime(match.find("div", {'class': 'm-date'}).attrs.get('data-matchdate'), '%Y%m%d'))),
                    'date_string': match.find("div", {'class': 'm-date'}).attrs.get('data-matchdate'),
                    'home': match.find("div", {'class': 'home'}).find('span', {'class': 't-nText'}).get_text(),
                    'home_logo': match.find('div', {'class': 'home'}).find('img', {'class': 't-i-3-logo'}).attrs.get('src'),
                    'away': match.find('div', {'class': 'away'}).find('span', {'class': 't-nText'}).get_text(),
                    'away_logo': match.find('div', {'class': 'away'}).find('img', {'class': 't-i-3-logo'}).attrs.get('src'),
                    'is_res': True
                }
            count += 1
Ejemplo n.º 17
0
def find_last_commit(repo, name):
    last_commit = None
    last_oid = None
    i = 0

    try:
        for commit in repo.repo.walk(repo.repo.head.target, pygit2.GIT_SORT_TIME):
            if 'iceberg' in commit.tree:
                iceoid = commit.tree['iceberg'].hex
                icetree = repo[iceoid]

                if name in icetree:
                    i += 1
                    oid = icetree[name].oid

                    has_changed = (oid != last_oid and last_oid)

                    if has_changed:
                        break

                    last_oid = oid

                elif i > 1:
                    break

                last_commit = commit
                print datetime.fromtimestamp(last_commit.commit_time)
    except:
        return last_commit or 'Corrupted repository'

    return last_commit or 'No commit'
Ejemplo n.º 18
0
def to_datetime(t, tzinfo=None):
    """Convert `t` into a `datetime` object, using the following rules:
    
     - If `t` is already a `datetime` object, it is simply returned.
     - If `t` is None, the current time will be used.
     - If `t` is a number, it is interpreted as a timestamp.
     
    If no `tzinfo` is given, the local timezone will be used.

    Any other input will trigger a `TypeError`.
    """
    if t is None:
        return datetime.now(tzinfo or localtz)
    elif isinstance(t, datetime):
        return t
    elif isinstance(t, date):
        return (tzinfo or localtz).localize(datetime(t.year, t.month, t.day))
    elif isinstance(t, (int, long, float)):
        if not (_min_ts <= t <= _max_ts):
            # Handle microsecond timestamps for 0.11 compatibility
            t *= 0.000001
        if t < 0 and isinstance(t, float):
            # Work around negative fractional times bug in Python 2.4
            # http://bugs.python.org/issue1646728
            frac, integer = math.modf(t)
            return datetime.fromtimestamp(integer - 1, tzinfo or localtz) \
                   + timedelta(seconds=frac + 1)
        return datetime.fromtimestamp(t, tzinfo or localtz)
    raise TypeError('expecting datetime, int, long, float, or None; got %s' %
                    type(t))
Ejemplo n.º 19
0
    def summary(self):
        output = {}

        output['zones'] = []
        for i,zone in enumerate(self.hardwareZones):
            i += 1
            output['status%i' % i] = 'on' if zone.isActive() else 'off'
            output['name%i' % i] = self.config.get('Zone%i' % i, 'name')
            output['zones'].append(i)
        for entry in self.history.getData():
            try:
                output['start%i' % entry['zone']]
                output['run%i' % entry['zone']]
                output['adjust%i' % entry['zone']]
            except KeyError:
                lStart = datetime.fromtimestamp(entry['dateTimeStart'])
                if entry['dateTimeStop'] > 0:
                    lStop = datetime.fromtimestamp(entry['dateTimeStop'])
                else:
                    lStop = datetime.now()
                output['start%i' % entry['zone']] = self.serialize(lStart)
                output['run%i' % entry['zone']] = self.serialize(lStop)-self.serialize(lStart)
                output['adjust%i' % entry['zone']] = entry['wxAdjust']
                
        return output
Ejemplo n.º 20
0
def calendar_events_list(start, end):
	# Get all events between the two dates:
	print 'Retrieve events between {} and {}'.format(start, end)
	import time
	#print time.gmtime(int(start))
	#print int(start)
	#print datetime.fromtimestamp(int(start))
	calendar_begins = datetime.fromtimestamp(int(start))
	calendar_ends = datetime.fromtimestamp(int(end))
	import calendar
	print 'The calendar runs from {} to {}'.format(calendar_begins, calendar_ends)#time.strftime("%Y", time.gmtime(calendar.timegm(calendar_begins))), time.strftime("%Y", time.gmtime(calendar.timegm(calendar_ends))))
	events = Event.objects.filter(date_time_end__gt=calendar_begins).filter(date_time_begin__lt=calendar_ends)
	events_list = []
	for event in events:
		events_list.append({
			'title': event.title,
			'start': calendar.timegm(event.date_time_begin.utctimetuple()),
			'end': calendar.timegm(event.date_time_end.utctimetuple()),
			'id': event.id,
			'url': '/vidburdur/'+str(event.id),
		})
	import pprint
	pprint.pprint(events_list)
	return HttpResponse(json.dumps(events_list), mimetype='application/javascript')
	print 'FINISHED'
Ejemplo n.º 21
0
def to_dict(streams):
    return dict( [ (i[0],
        {   'id'        : i[0],
            'start_time': datetime.fromtimestamp(i[1]),
            'end_time'  : datetime.fromtimestamp(i[2]),
            'rate'      : i[3]
        }) for i in streams] )
Ejemplo n.º 22
0
def extract_and_attach_metadata(mediaitem, filepath):
    if mediaitem.media_type_cd == 100:
        try:
            media_file = open(filepath, 'rb')
            tags = exifread.process_file(media_file, details=False)
            org_date_tag = tags.get('EXIF DateTimeOriginal')
            org_date = datetime.now()
            if org_date_tag:
                org_date = datetime.strptime(str(org_date_tag), '%Y:%m:%d %H:%M:%S')
            else:
                org_date_tag = tags.get('EXIF DateTimeDigitized')
                if org_date_tag:
                    org_date = datetime.strptime(str(org_date_tag), '%Y:%m:%d %H:%M:%S')
                else:
                    org_date_tag = os.stat(filepath).st_birthtime
                    if org_date_tag:
                        org_date = datetime.fromtimestamp(org_date_tag)
                    else:
                        org_date_tag = os.stat(filepath).st_ctime
                        if org_date_tag:
                            org_date = datetime.fromtimestamp(org_date_tag)
            mediaitem.origin_date = org_date
        except:
            logging.error('failed to extract metadata for: ' + str(mediaitem))
    file_size = os.stat(filepath).st_size

    mediaitem.file_size = file_size
    logging.log(logging.DEBUG, str(mediaitem) + ' - set file size = ' + str(file_size))
Ejemplo n.º 23
0
def profile():
    args = g.args
    filter(args, ('aid', 'start_t', 'end_t', 'limits', 'longitude', 'latitude', 'loc'))
    intro = args.get('intro', '', type=str)
    image = args.get('image', '', type=str)
    time_validate(args['start_t'], args['end_t'])
    start_t = datetime.fromtimestamp(float(args['start_t']))
    end_t = datetime.fromtimestamp(float(args['end_t']))
    a = Activity.query.filter(Activity.aid == args['aid']).first()
    if a is None:
        raise ThrownError('No such activity.')
    if a.host != g.user:
        raise ThrownError('You do not have the privilege.')
    for k in args.iterkeys():
        setattr(a, k, args[k])
    a.start_t = start_t
    a.end_t = end_t
    a.intro = intro
    a.image = image
    db.session.add(a)
    db.session.commit()
    r = {
            'status': True, 
            'message': 'OK', 
            'result': ''
        }
    return json.dumps(r)
Ejemplo n.º 24
0
def print_summary():
    print('[%s - %s]' % (
        datetime.fromtimestamp(start_time).strftime('%Y-%m-%d %H:%M:%S'),
        datetime.fromtimestamp(end_time).strftime('%Y-%m-%d %H:%M:%S')))

    for k1 in sorted(summary.keys()):
        print_line(0, MSG_T[k1], total(summary[k1]))

        if k1 == MSG_T['TABLE_DUMP']:
            for k2 in sorted(summary[k1].keys()):
                print_line(1, TD_ST[k2], total(summary[k1][k2]))

        elif k1 == MSG_T['TABLE_DUMP_V2']:
            for k2 in sorted(summary[k1].keys()):
                print_line(1, TD_V2_ST[k2], total(summary[k1][k2]))

        elif ( k1 == MSG_T['BGP4MP']
            or k1 == MSG_T['BGP4MP_ET']):

            for k2 in sorted(summary[k1].keys()):
                print_line(1, BGP4MP_ST[k2], total(summary[k1][k2]))

                if (   k2 == BGP4MP_ST['BGP4MP_MESSAGE']
                    or k2 == BGP4MP_ST['BGP4MP_MESSAGE_AS4']
                    or k2 == BGP4MP_ST['BGP4MP_MESSAGE_LOCAL']
                    or k2 == BGP4MP_ST['BGP4MP_MESSAGE_AS4_LOCAL']):
                    for k3 in sorted(summary[k1][k2].keys()):
                        print_line(2, BGP_MSG_T[k3], total(summary[k1][k2][k3]))

                elif ( k2 == BGP4MP_ST['BGP4MP_STATE_CHANGE']
                    or k2 == BGP4MP_ST['BGP4MP_STATE_CHANGE_AS4']):
                    for k3 in sorted(summary[k1][k2].keys()):
                        print_line(2, BGP_FSM[k3], total(summary[k1][k2][k3]))
Ejemplo n.º 25
0
    def domian(self, irc, msg, args):
        """

        Returns information about the next episode of Domian
        """
        now = datetime.now()
        feed = feedparser.parse('http://nachtlager.de/go/de/feed/week')
        nextshow = None
        
        for show in feed.entries:
            showStart = datetime.fromtimestamp(mktime(show.published_parsed)).replace(hour=1)
            showEnd = datetime.fromtimestamp(mktime(show.published_parsed)).replace(hour=2)
            show['showstart'] = showStart
            
            if showStart < now and showEnd > now:
                nextshow = show
                nextshow['onair'] = True
            else:
                if showStart > now:
                    if nextshow is None:
                        nextshow = show
                        nextshow['onair'] = False
                    else:
                        if showStart < nextshow['showstart']:
                            nextshow = show
                            nextshow['onair'] = False
        try:
            if nextshow['onair']:
                reply = u'Domian läuft gerade. (%s) - http://www.wdr.de/wdrlive/media/einslive.m3u' % nextshow.description
            else:
                starts_in = formatTimespan(int(mktime(nextshow['showstart'].timetuple()) - time()))
                reply = u'Nächste Sendung am %s (%s) - in %s' % (nextshow['showstart'].strftime('%d.%m.%Y um %H:%M'), nextshow.description, starts_in)
        except Exception, e:
            log.error('Domian: %s' % repr(e))
            reply = u'Noch keine Daten vorhanden!'
Ejemplo n.º 26
0
def MyAccDetails(url):
        link = Open_URL(url)
        match=re.compile('"username":"******"').findall(link)
        match1=re.compile('"status":"(.+?)"').findall(link)
        match2=re.compile('"exp_date":"(.+?)"').findall(link) 	
        match3=re.compile('"active_cons":"(.+?)"').findall(link)
        match4=re.compile('"created_at":"(.+?)"').findall(link)
        match5=re.compile('"max_connections":"(.+?)"').findall(link)
        match6=re.compile('"is_trial":"1"').findall(link)
        for url in match:
                AddAccInfo('My Boyz Toyz Account Information','','',Images +'MyAcc.png')
                AddAccInfo('Username:  %s'%(url),'','',Images + 'MyAcc.png')
        for url in match1:
                AddAccInfo('Status:  %s'%(url),'','',Images + 'MyAcc.png')
        for url in match4:
                dt = datetime.fromtimestamp(float(match4[0]))
                dt.strftime('%Y-%m-%d %H:%M:%S')
                AddAccInfo('Created:  %s'%(dt),'','',Images +'MyAcc.png')
        for url in match2:
                dt = datetime.fromtimestamp(float(match2[0]))
                dt.strftime('%Y-%m-%d %H:%M:%S')
                AddAccInfo('Expires:  %s'%(dt),'','',Images +'MyAcc.png')
        for url in match3:
                AddAccInfo('Active Connection:  %s'%(url),'','',Images +'MyAcc.png')
        for url in match5:
                AddAccInfo('Max Connection:  %s'%(url),'','',Images +'MyAcc.png') 
        for url in match6:
                AddAccInfo('Trial: Yes','','',Images +'MyAcc.png')
        AddAccInfo('Find us on Facebook - Search Boyz Toyz Club','','','')
        AddAccInfo('or','','','') 
        AddAccInfo('Sign up here - www.boyztoyz.club','','','')
Ejemplo n.º 27
0
Archivo: lib.py Proyecto: Dsls/domogik
 def _list_clients(self):
     """ List all the clients (alives and deads) in the log file
     """
     msg =  "\n| Client id             | Client source                      | Interval | Last seen                  | Status  | Nb OK  | Nb KO  |"
     msg += "\n|-----------------------+------------------------------------+----------+----------------------------+---------+--------+--------|"
     for client in self._client_list:
         msg += "\n| %-21s | %-34s | %8s | %25s | %-7s | %6s | %6s |" \
                        % (client['id'],
                           client['source'],
                           client['interval'],
                           datetime.fromtimestamp(client['last_seen']).isoformat(),
                           client['alive'],
                           client['nb_valid_messages'],
                           client['nb_invalid_messages'])
     msg += "\n|-----------------------+------------------------------------+----------+----------------------------+---------+--------+--------|"
     for client in self._dead_client_list:
         msg += "\n| %-21s | %-34s | %8s | %25s | %-7s | %6s | %6s |" \
                        % (client['id'],
                           client['source'],
                           client['interval'],
                           datetime.fromtimestamp(client['last_seen']).isoformat(),
                           client['alive'],
                           client['nb_valid_messages'],
                           client['nb_invalid_messages'])
     return msg
Ejemplo n.º 28
0
    def process_log(self, log_file):
        ''' Processes an EDEX log and creates a new log file with only the relevant, 
            searchable data. '''

        new_log_file = "/".join((EDEX['processed_log_path'], log_file.split("/")[-1] + ".p"))

        # Check to see if the processed log file already exists.
        if os.path.isfile(new_log_file):
            log_file_timestamp = datetime.fromtimestamp(
                os.path.getmtime(log_file))
            new_log_file_timestamp = datetime.fromtimestamp(
                os.path.getmtime(new_log_file))
            ''' Check to see if the original log file has been modified since being previously 
                processed. '''
            if log_file_timestamp < new_log_file_timestamp:
                self.logger.info(
                    "%s has already been processed." % log_file)
                return
            else:
                self.logger.info((
                    "%s has already been processed, "
                    "but has been modified and will be re-processed."
                    ) % log_file)

        result = shell.zgrep("Finished Processing file", log_file)[1]
        with open(new_log_file, "w") as outfile:
            for row in result:
                outfile.write(row)
        self.logger.info(
            "%s has been processed and written to %s." % (log_file, new_log_file))
Ejemplo n.º 29
0
    def create_timesheet(self, data, user):

        ts = Timesheet(**data)

        timesheets = self.filter(start__lte=ts.end, end__gte=ts.start, employee=ts.employee)
        assert timesheets.count() <= 0, "There is already a timesheet for employee %s for this pay period." % str(ts.employee)

        ts.full_clean()
        ts.save()

        # Need to make sure to encompass the entire day.
        start = datetime.fromtimestamp(ts.start)
        start = start.replace(hour=00)
        start = start.replace(minute=00)
        start = start.replace(second=00)

        end = datetime.fromtimestamp(ts.end)
        end = end.replace(hour=23)
        end = end.replace(minute=59)
        end = end.replace(second=59)

        shifts = Shift.objects.filter(time_in__gte=start, time_out__lte=end, deleted=False, employee=ts.employee)
        ts.shifts = shifts

        return ts
Ejemplo n.º 30
0
    def new(self, **kwargs):
        _dirty_fields = []
        _values = []

        for k, v in kwargs.iteritems():
            _dirty_fields.append(k)
            if k in self.__time_fields:
                _v = datetime.now() if v is None else v
                if not isinstance(v, datetime):
                    v = datetime.fromtimestamp( v )
            _values.append(v)
        # init __time_fields
        for k in self.__time_fields:
            if kwargs.has_key(k):
                continue
            _dirty_fields.append(k)
            v = int(time.time())
            kwargs[k] = v
            v = datetime.fromtimestamp(v)
            _values.append(v)
        # init deleted
        if self.__name in TABLE_HAD_DELETED:
            kwargs['deleted'] = 0
            _dirty_fields.append('deleted')
            _values.append(0)

        self.__dict__.update(kwargs)
        _sql = 'INSERT INTO %s (' % self.table  + ','.join(_dirty_fields) + ') VALUES ('  + ','.join(['%s'] * len(_values)) + ')'
 
        if self.__name in TABLE_NO_AI:
            yield POOL.insert(_sql, _values)
            self.__attrib_id = kwargs['id']
        else:
            self.__attrib_id = yield POOL.insert(_sql, _values)
        raise defer.returnValue(self.__attrib_id)
Ejemplo n.º 31
0
def detail(graph_device,
           graph_plugin,
           graph_type,
           time_from,
           time_to,
           graph_mode="aggregated",
           cdefop=None):
    """
    Show the details of a single graph
    """

    device_map = device_index()
    graph_title = []
    graph_comment = ""

    # Got datetime values from post? Convert them
    if request.method == 'POST' and request.form.get('time_from'):
        time_from = int(
            mktime(
                datetime.strptime(request.form['time_from'],
                                  settings.DATE_FORMAT).timetuple()))

    if request.method == 'POST' and request.form.get('time_to'):
        time_to = int(
            mktime(
                datetime.strptime(request.form['time_to'],
                                  settings.DATE_FORMAT).timetuple()))

    # User defined graph comment
    if request.method == 'POST' and request.form.get('comment'):
        graph_comment = request.form['comment']
    else:
        graph_comment = rrdscout.Graph.generate_comment(
            graph_device.split(','))

    # Shall we export the graph data instead of generating an image?
    if request.method == 'POST' and request.form.get('export_graph'):
        return export_data(graph_device, graph_plugin, graph_type, time_from,
                           time_to)

    # if device is a number its an index to the ctime sorted device directory
    try:
        graph_title.append(device_map.get(int(graph_device), ""))
    except ValueError:
        pass

    # if device is a list
    if not graph_title:
        try:
            graph_title = sorted(
                map(lambda x: device_map.get(int(x), x),
                    graph_device.split(',')))
        except ValueError:
            pass

    # Still no label?
    if not graph_title:
        graph_title = [graph_device]

    # calculate timespans
    time_from_day = int(time()) - 60 * 60 * 24
    time_from_week = int(time()) - 60 * 60 * 24 * 7
    time_from_month = int(time()) - 60 * 60 * 24 * 30
    time_from_year = int(time()) - 60 * 60 * 24 * 365

    return render_template(
        "detail.html",
        page_name=settings.PAGE_NAME + ": " + gettext("Data source detail"),
        graph_title=graph_title,
        graph_device=graph_device,
        graph_plugin=graph_plugin,
        graph_type=graph_type,
        graph_comment=graph_comment,
        graph_mode=graph_mode,
        time_from=time_from,
        time_to=time_to,
        time_to_str=datetime.fromtimestamp(time_to).strftime(
            settings.DATE_FORMAT),
        time_from_str=datetime.fromtimestamp(time_from).strftime(
            settings.DATE_FORMAT),
        time_from_day=time_from_day,
        time_from_week=time_from_week,
        time_from_month=time_from_month,
        time_from_year=time_from_year)
Ejemplo n.º 32
0
def tstamp_to_str(timestamp):
    dt_stamp = datetime.fromtimestamp(timestamp)
    return dt_stamp.astimezone(TZ).strftime('%Y-%m-%d %I:%M:%S %p')
Ejemplo n.º 33
0
def time_ago(t, now=None):
    now = datetime.fromtimestamp(now) if now else datetime.now()
    delta = now - datetime.fromtimestamp(t)
    return time_duration(delta, ago=True)
Ejemplo n.º 34
0
    async def request(self, method, endpoint, authorize=True, **kwargs):
        r"""주어진 길드 수를 KoreanBots API로 보냅니다.

        파라미터
        -------------
        method: str
            HTTP 리퀘스트 메소드
        url: str
            KoreanBots API의 엔드포인트
        authorize: 선택[bool]
            API 리퀘스트에 토큰을 함께 전송할지 입니다.
            기본값은 True입니다.

        예외
        --------
        .errors.AuthorizeError
            토큰이 필요한 엔드포인트지만, 클라이언트에 토큰이 주어지지 않았습니다.
        .errors.Unauthrized
            인증되지 않았습니다, KoreanBots 토큰이 잘못되었을 수 있습니다.
        .errors.Forbidden
            접근 권한이 없습니다.
        .errors.NotFound
            찾을 수 없습니다, 파라미터를 확인하세요.
        .errors.HTTPException
            알수없는 HTTP 에러가 발생했습니다, 주로 400에 발생합니다.
        """
        url = self.BASE + endpoint
        kwargs['headers'] = {"content-type": "application/json"}
        if authorize and self.token:
            kwargs['headers']['token'] = self.token
        elif authorize and not self.token:
            raise AuthorizeError('this endpoint required koreanbots token.')

        if not self._globalLimit.is_set():
            await self._globalLimit.wait()

        for tries in range(5):
            async with ClientSession() as session:
                async with session.request(method, url, **kwargs) as response:
                    log.debug(f'{method} {url} returned {response.status}')
                    Data = await detectJson(response)

                    remainLimit = response.headers.get('x-ratelimit-remaining')
                    if remainLimit == 0 or response.status == 429:
                        resetLimitTimestamp = int(
                            response.headers.get('x-ratelimit-reset'))
                        resetLimit = datetime.fromtimestamp(
                            resetLimitTimestamp)

                        retryAfter = resetLimit - datetime.now()

                        log.warning(
                            r"we're now rate limited. retrying after %.2f seconds",
                            retryAfter.total_seconds())
                        if not endpoint == '/bot/servers':
                            self._globalLimit.clear()

                        await sleep(retryAfter.total_seconds())
                        if not endpoint == '/bot/servers':
                            self._globalLimit.set()

                        continue

                    if 200 <= response.status < 300:
                        return Data

                    if response.status == 401:
                        raise Unauthorized(response, Data)
                    elif response.status == 403:
                        raise Forbidden(response, Data)
                    elif response.status == 404:
                        raise NotFound(response, Data)
                    else:
                        raise HTTPException(response, Data)
        raise HTTPException(response, Data)
Ejemplo n.º 35
0
def parse_date(json_date):
    """parses out the date in the json string and returns a python date object"""
    dateStr = json_date[6:len(json_date) - 2]
    return datetime.fromtimestamp(int(dateStr) / 1000)
Ejemplo n.º 36
0
try: name
except NameError:
    print("No directory indicated\nQuitting...")
else:
    print("Director is:" + name)
    


parentDir = Path(name)
exeDir = Path("C:/Users/RobinPM/Documents/gits/DICOM_Nii_T1/execs")

logging.info( "Parent Directory is:\n" + str(parentDir))
os.chdir(parentDir)
folderList = os.listdir()  # Makes list of files wi
logging.info(str(datetime.fromtimestamp(time.time()))+ " List of Files Found:\n" + str(folderList))

for fIn in pb( range(0, len(folderList))):

        # Set up directory where DICOM folders are
    workDir = Path.joinpath(parentDir, folderList[fIn]) # to be changed
    logging.info( "currently processing " +  str(folderList[fIn]))

    winPath = workDir

    # if (' ' in workDir):
    #     winPath = winPath.replace(' ', '_')
    #     logging.error('The folder name had an invalid character, changing name to:\n' + winPath)

    if ('^' in str(workDir)):
       winPath = str(winPath).replace('^', '^^')
Ejemplo n.º 37
0
 def __str__(self):
     d = datetime.fromtimestamp(self.timestamp).strftime("%d/%m %H:%M:%S")
     return f"@{self.author} [{d}] -- {self.content}"
Ejemplo n.º 38
0
    def parse(self, proxy=False):
        result = []

        # avoiding blocks
        headers = {
            'user-agent': feed.UserAgent_random().lstrip(),
            'referer': 'https://www.google.com/search?newwindow=1&q='+self.href
        }
        if proxy != False:
            proxyDict = {
                "http": "http://" + proxy, 
                "https": "https://" + proxy,
            }
        else:
            proxyDict = {}

        # custom ранобэ.рф API import
        if self.href.find('http://xn--80ac9aeh6f.xn--p1ai/') != -1:
            request = f"https://xn--80ac9aeh6f.xn--p1ai/api/v2/books/{ self.href[31:-1] }/chapters"
            request = requests.get(request).json()  # (request, headers=headers, proxies=proxyDict)

            for each in request['items']:
                # ignoring payed chapters
                if each['availabilityStatus'] == 'free':
                    result.append(feedUpdate(
                        name=each["title"],
                        href="http://xn--80ac9aeh6f.xn--p1ai"+each["url"],
                        datetime=datetime.strptime(each["publishTime"], '%Y-%m-%d %H:%M:%S'),
                        title=self.title))

        # custom instagram import
        if self.href.find('https://www.instagram.com/') != -1:
            if not randint(0, 100) == 0:
                return []
            try:
                request = requests.get(self.href, headers=headers, proxies=proxyDict)
                request = BeautifulSoup(request.text, "html.parser")

                for each in request.find_all('script'):
                    data = 'window._sharedData = '
                    if each.text.find(data) != -1:
                        # preparing JSON
                        data = each.text.find(data) + len(data)  # data start position
                        data = each.text[data:-1]  # -1 is for removing ; in the end
                        data = json.loads(data)

                        # selecting data from JSON
                        data = data['entry_data']['ProfilePage'][0]['graphql']
                        data = data['user']['edge_owner_to_timeline_media']['edges']

                        # parsing data from JSON
                        for each in data:
                            # avoiding errors caused by empty titles
                            try:
                                result_name = each['node']['edge_media_to_caption']['edges'][0]['node']['text']
                            except IndexError:
                                result_name = 'no title'

                            result.append(feedUpdate(
                                name=result_name,
                                href="http://instragram.com/p/"+each['node']['shortcode'],
                                datetime=datetime.fromtimestamp(each['node']['taken_at_timestamp']),
                                title=self.title))
            except (KeyError, requests.exceptions.ProxyError, requests.exceptions.SSLError) as err:
                return []

        # custom RSS YouTube converter (link to feed has to be converted manually)
        elif self.href.find('https://www.youtube.com/channel/') != -1:
            self.href_title = self.href[:]
            # 32 = len('https://www.youtube.com/channel/')
            # 7 = len('/videos')
            self.href = "https://www.youtube.com/feeds/videos.xml?channel_id=" + self.href[32:-7]
            result = feed.parse(self)

        # custom RSS readmanga converter (link to feed has to be converted manually to simplify feed object creation)
        elif self.href.find('http://readmanga.me/') != -1 and self.href.find('readmanga.me/rss/manga') == -1 and self.href_title == None:
            # 20 = len('http://readmanga.me/')
            self.href = "feed://readmanga.me/rss/manga?name=" + self.href[20:]
            result = feed.parse(self)

        # custom RSS mintmanga converter (link to feed has to be converted manually to simplify feed object creation)
        elif self.href.find('http://mintmanga.com/') != -1 and self.href.find('mintmanga.com/rss/manga') == -1 and self.href_title == None:
            # 21 = len('http://mintmanga.com/')
            self.href = "feed://mintmanga.com/rss/manga?name=" + self.href[21:]
            result = feed.parse(self)

        # custom RSS deviantart converter (link to feed has to be converted manually to simplify feed object creation)
        elif self.href.find('https://www.deviantart.com/') != -1:
            self.href_title = self.href[:]
            # 27 = len('https://www.deviantart.com/')
            # 9 = len('/gallery/')
            self.href = self.href[27:-9]
            self.href = "http://backend.deviantart.com/rss.xml?q=gallery%3A" + self.href
            result = feed.parse(self)

        # custom fantasy-worlds.org loader
        elif self.href.find('https://fantasy-worlds.org/series/') != -1:
            strainer = SoupStrainer('div', attrs={'class': 'rightBlock'})

            request = requests.get(self.href, headers=headers, proxies=proxyDict)
            request = BeautifulSoup(request.text, "html.parser", parse_only=strainer)

            for each in request.find('ul').find('li').find('ul').find('li').find('ul').find_all('li'):
                result.append(feedUpdate(
                    name=f"{self.title} {each.text[:each.text.find(' // ')]}",
                    href=each.find('a')['href'],
                    datetime=datetime.now(),  # <=== fake date
                    title=self.title))

        # custom pikabu import
        elif self.href.find('pikabu.ru/@') != -1:
            # try:
            strainer = SoupStrainer('div', attrs={'class': 'stories-feed__container'})

            request = requests.get(self.href, headers=headers, proxies=proxyDict)
            request = BeautifulSoup(request.text, "html.parser", parse_only=strainer)

            for each in request.find_all('article'):
                try:
                    result_datetime = each.find('time')['datetime'][:-3]+"00"
                    result_datetime = datetime.strptime(result_datetime, '%Y-%m-%dT%H:%M:%S%z')

                    result.append(feedUpdate(
                        name=each.find('h2', {'class': "story__title"}).find('a').getText(),
                        href=each.find('h2', {'class': "story__title"}).find('a')['href'],
                        datetime=result_datetime,
                        title=self.title))

                except (TypeError, AttributeError) as err:
                    # advertisement, passing as no need to save it
                    pass
            # except (requests.exceptions.ConnectionError, requests.exceptions.SSLError) as err:
            #     # failed connection, hope it works from time to time
            #     return []

        # # custom fanserials parser
        # elif self.href.find('http://fanserial.net/') != -1 and self.filter is not None:
        #     strainer = SoupStrainer('ul', attrs={'id': 'episode_list'})
        #
        #     request = requests.get(self.href, headers=headers, proxies=proxyDict)
        #     request = BeautifulSoup(request.text, "html.parser", parse_only=strainer)
        #     print(request)
        #
        #     for each in request.find_all('li'):
        #         print(each)
        #         result_href = ''
        #         for each_span in each.find('div').find('div', attrs={'class': 'serial-translate'}).find_all('span'):
        #             result_href = 'http://fanserial.tv' + each_span.find('a').get('href')
        #
        #         result.append(feedUpdate(
        #             name=each.find('div', attrs={'class': 'field-description'}).find('a').text,
        #             href=result_href,
        #             datetime=datetime.now(),  # <=== fake date
        #             title=self.title))

        # default RSS import
        else:
            proxyDict = urllib.request.ProxyHandler(proxyDict)

            request = feedparser.parse(self.href, request_headers=headers, handlers=[proxyDict])

            for each in request["items"]:
                # HREF RESULT
                if self.title == "Expresso":
                    result_href = each["summary"]

                    start = result_href.find('https://expres.co/')
                    end = result_href.find('"')

                    result_href = result_href[start:end]
                else:
                    result_href = each["links"][0]["href"]

                # DATE RESULT: parsing dates
                if "published" in each:
                    result_datetime = each["published"]
                elif "updated" in each:
                    result_datetime = each["updated"]
                else:
                    print(f"result_datetime broke for { self.title }")
                
                tzinfos = {'PDT': gettz("America/Los_Angeles"), 'PST': gettz("America/Juneau")}
                result_datetime = parser.parse(result_datetime, tzinfos=tzinfos)

                # APPEND RESULT
                result.append(feedUpdate(
                    name=each["title_detail"]["value"],
                    href=result_href,
                    datetime=result_datetime,
                    title=self.title))

        # universal postfixes
        result_filtered = []
        for each in result:
            # FILTERING: passing item cycle if filter does not match
            if self.filter is not None:
                if each.name.find(self.filter) == -1 or each.href.find(self.filter) == -1:
                    continue

            # DATETIME fixes
            # fix timezone unaware
            # if each.datetime.tzinfo is not None and each.datetime.tzinfo.utcoffset(each.datetime) is not None:
            #     each_dt = localtime(each.datetime)
            #     each.datetime = datetime(each_dt.year, each_dt.month, each_dt.day,
            #          each_dt.hour, each_dt.minute, each_dt.second)
                     
            # if each.datetime.tzinfo is not None and each.datetime.tzinfo.utcoffset(each.datetime) is not None:
            #     print("!!!! WARNING !!!!")
            # # add DELAY
            # if type(self.delay) is not type(None):
            #     each.datetime += timedelta(hours=self.delay)

            # NAME fixes
            each.name = ' '.join(each.name.split())
            each.name = each.name[:140]  # SQLite does not support max-length
            # extra symbols
            if each.title == 'Shadman':
                each.name = each.name[:each.name.find('(')-1]
            elif each.title == 'Apple' and each.name[-len('Apple'):] == 'Apple':
                # - symbol can be a variety of different symbols
                # 8 = len(' - Apple')
                each.name = each.name[:-8]
            elif each.title == 'LastWeekTonight':
                end = each.name.find(': Last Week Tonight with John Oliver (HBO)')
                if end != -1:
                    each.name = each.name[:end]

            result_filtered.append(each)

        return result_filtered
import os
import psutil
from datetime import datetime
print("主机信息".center(50, '*'))
info = os.uname()
print("""
    操作系统: %s
    主机名: %s
    内核版本: %s
    硬件架构: %s
""" % (info.sysname, info.nodename, info.release, info.machine))

print("开机信息".center(50, '*'))
boot_time = psutil.boot_time()  # 返回时间戳
#  将时间戳转换为datetime类型的时间2019-01-15 08:59:01
boot_time_obj = datetime.fromtimestamp(boot_time)
# print(type(boot_time_obj))
now_time = datetime.now()
delta_time = now_time - boot_time_obj
# print(type(delta_time))
print("开机时间: ", boot_time_obj)
print("当前时间: ", str(now_time).split('.')[0])  # str是为了将对象转换为字符串, 实现分离;
# split分离是为了去掉毫秒
print("开机时长: ", str(delta_time).split('.')[0])  # split分离是为了去掉毫秒

print("当前登陆用户".center(50, '*'))
login_users = psutil.users()
# 集合生成式实现去重
print({user.name for user in login_users})

info = psutil.users()[0]
Ejemplo n.º 40
0
	final_login_time = 0
	for i,row in enumerate(user_sessions_reader):
		if i > 0:
			user_id = row[2]
			if user_id != "":
				if user_id in user_logins:
					current_login_time = int(float(row[0]))
					if current_login_time - user_logins[user_id]["last_login_time"] > secs_per_day:
						user_logins[user_id]["days_with_login"] += 1
						user_logins[user_id]["last_login_time"] = current_login_time
				else:
					user_logins[row[2]] = {"first_login_time":int(float(row[0])),"last_login_time":int(float(row[0])),"days_with_login":0,}
			final_login_time = max(final_login_time,int(float(row[0])))


cohort_three_end = datetime.fromtimestamp(final_login_time).strftime('%m/%d/%Y') 

cohorts = {1:{"start":time.mktime(datetime.strptime(cohort_one_start, "%m/%d/%Y").timetuple()), "end":time.mktime(datetime.strptime(cohort_one_end, "%m/%d/%Y").timetuple()),"n":0,"av_stickiness":0,"n_active":0},2:{"start":time.mktime(datetime.strptime(cohort_two_start, "%m/%d/%Y").timetuple()),"end":time.mktime(datetime.strptime(cohort_two_end, "%m/%d/%Y").timetuple()),"n":0,"av_stickiness":0,"n_active":0},3:{"start":time.mktime(datetime.strptime(cohort_three_start, "%m/%d/%Y").timetuple()),"end":cohort_three_end,"n":0,"av_stickiness":0,"n_active":0}}

for key in user_logins:
	if user_logins[key]["first_login_time"] >= cohorts[2]["start"] and user_logins[key]["first_login_time"] < cohorts[3]["start"]:
		user_logins[key]["cohort"] = 2
	else:
		user_logins[key]["cohort"] = 3

stickiness = {}
for key in user_logins:
	delta = datetime.fromtimestamp(final_login_time) - datetime.fromtimestamp(user_logins[key]["first_login_time"])
	user_logins[key]["days_since_signup"] = delta.days
	if user_logins[key]["days_since_signup"] != 0:
		stickiness[key] = float(user_logins[key]["days_with_login"])/user_logins[key]["days_since_signup"]
Ejemplo n.º 41
0
    def load_har_entry(self, har_entry: MutableMapping[str, Any], all_requests: List[str], rendered_html: Optional[BytesIO]=None) -> None:
        """Load one entry of the HAR file, initialize most of the features of the node"""
        if not self.name:
            # We're in the actual root node
            # NOTE: by the HAR specs: "Absolute URL of the request (fragments are not included)."
            self.add_feature('name', unquote_plus(har_entry['request']['url']))

        splitted_url = urlparse(self.name)
        if splitted_url.scheme == 'blob':
            # this is a new weird feature, but it seems to be usable as a URL, so let's do that
            self.add_feature('url_split', urlparse(splitted_url.path))
        else:
            self.add_feature('url_split', splitted_url)

        if rendered_html:
            self.add_feature('rendered_html', rendered_html)

        # If the URL contains a fragment (i.e. something after a #), it is stripped in the referer.
        # So we need an alternative URL to do a lookup against
        self.add_feature('alternative_url_for_referer', self.name.split('#')[0])

        if '.' in har_entry['startedDateTime']:
            self.add_feature('start_time', datetime.strptime(har_entry['startedDateTime'], '%Y-%m-%dT%H:%M:%S.%f%z'))
        else:
            self.add_feature('start_time', datetime.strptime(har_entry['startedDateTime'], '%Y-%m-%dT%H:%M:%S%z'))

        self.add_feature('pageref', har_entry['pageref'])

        self.add_feature('time', timedelta(milliseconds=har_entry['time']))
        self.add_feature('time_content_received', self.start_time + self.time)  # Instant the response is fully received (and the processing of the content by the browser can start)
        self.add_feature('hostname', self.url_split.hostname)

        if not self.hostname:
            self.logger.warning(f'Something is broken in that node: {har_entry}')

        try:
            ipaddress.ip_address(self.hostname)
            self.add_feature('hostname_is_ip', True)
        except ValueError:
            # Not an IP
            pass

        if not hasattr(self, 'hostname_is_ip'):
            tld = get_public_suffix_list().get_tld(self.hostname, strict=True)
            if tld:
                self.add_feature('known_tld', tld)
            else:
                self.logger.info(f'###### No TLD/domain broken {self.name}')

        self.add_feature('request', har_entry['request'])
        # Try to get a referer from the headers
        for h in self.request['headers']:
            if h['name'].lower() == 'referer':
                self.add_feature('referer', unquote_plus(h['value']))
            if h['name'].lower() == 'user-agent':
                self.add_feature('user_agent', h['value'])

        if 'user_agent' not in self.features:
            self.add_feature('user_agent', '')

        if 'method' in self.request and self.request['method'] == 'POST' and 'postData' in self.request:
            # If the content is empty, we don't care
            if self.request['postData']['text']:
                # We have a POST request, the data can be base64 encoded or urlencoded
                posted_data: Union[str, bytes] = self.request['postData']['text']
                if 'encoding' in self.request['postData']:
                    if self.request['postData']['encoding'] == 'base64':
                        if len(posted_data) % 4:
                            # a this point, we have a string for sure
                            posted_data += '==='  # type: ignore
                        posted_data = b64decode(posted_data)
                    else:
                        self.logger.warning(f'Unexpected encoding: {self.request["postData"]["encoding"]}')

                if 'mimeType' in self.request['postData']:
                    if self.request['postData']['mimeType'].startswith('application/x-www-form-urlencoded'):
                        # 100% sure there will be websites where decode will fail
                        if isinstance(posted_data, bytes):
                            try:
                                posted_data = posted_data.decode()
                            except Exception:
                                self.logger.warning(f'Expected urlencoded, got garbage: {posted_data!r}')
                        if isinstance(posted_data, str):
                            posted_data = unquote_plus(posted_data)
                    elif self.request['postData']['mimeType'].startswith('application/json') or self.request['postData']['mimeType'].startswith('application/csp-report'):
                        try:
                            posted_data = json.loads(posted_data)
                        except Exception:
                            self.logger.warning(f"Expected json, got garbage: {self.request['postData']['mimeType']} - {posted_data!r}")

                    elif self.request['postData']['mimeType'].startswith('multipart/form-data'):
                        # FIXME multipart content (similar to email). Not totally sure what do do with it tight now.
                        pass
                    elif self.request['postData']['mimeType'].startswith('application/x-protobuffer'):
                        # FIXME If possible, decode?
                        pass
                    elif self.request['postData']['mimeType'].startswith('text'):
                        # We got text, keep what we already have
                        pass
                    elif self.request['postData']['mimeType'] == '?':
                        # Just skip it, no need to go in the warnings
                        pass
                    elif self.request['postData']['mimeType'] == 'application/octet-stream':
                        # Should flag it.
                        pass
                    else:
                        # Weird stuff: Image/GIF application/unknown application/grpc-web+proto
                        self.logger.warning(f'Unexpected mime type: {self.request["postData"]["mimeType"]}')

                # The data may be json, try to load it
                try:
                    posted_data = json.loads(posted_data)
                except Exception:
                    pass

                if isinstance(posted_data, bytes):
                    # Try to decode it as utf-8
                    try:
                        posted_data = posted_data.decode('utf-8')
                    except Exception:
                        pass
                self.add_feature('posted_data', posted_data)

        self.add_feature('response', har_entry['response'])

        self.add_feature('response_cookie', har_entry['response']['cookies'])
        if self.response_cookie:
            self.add_feature('set_third_party_cookies', False)
            # https://developer.mozilla.org/en-US/docs/Web/HTTP/headers/Set-Cookie
            # Cookie name must not contain "=", so we can use it safely
            self.add_feature('cookies_received', [])
            for cookie in self.response_cookie:
                is_3rd_party = False
                # If the domain is set, the cookie will be sent in any request to that domain, and any related subdomains
                # Otherwise, it will only be sent to requests to the exact hostname
                # There are other limitations, like secure and path, but in our case, we won't care about it for now as we mainly want to track where the cookies are sent
                if 'domain' in cookie and cookie['domain']:
                    cookie_domain = cookie['domain']
                    if cookie_domain[0] == '.':
                        cookie_domain = cookie_domain[1:]
                else:
                    cookie_domain = self.hostname
                if not self.hostname.endswith(cookie_domain):
                    self.add_feature('set_third_party_cookies', True)
                    is_3rd_party = True
                self.cookies_received.append((cookie_domain, f'{cookie["name"]}={cookie["value"]}', is_3rd_party))

        self.add_feature('request_cookie', har_entry['request']['cookies'])
        if self.request_cookie:
            # https://developer.mozilla.org/en-US/docs/Web/HTTP/headers/Set-Cookie
            # Cookie name must not contain "=", so we can use it safely
            # The content of this feature is initialized in Har2Tree.__init__
            # And it contains a reference to the URL Node the cookies comes from initially
            # (the cookie was in the response of that request)
            self.add_feature('cookies_sent', {})
            for cookie in self.request_cookie:
                self.cookies_sent[f'{cookie["name"]}={cookie["value"]}'] = []

        if not har_entry['response']['content'].get('text') or har_entry['response']['content']['text'] == '':
            # If the content of the response is empty, skip.
            self.add_feature('empty_response', True)
        else:
            self.add_feature('empty_response', False)
            if har_entry['response']['content'].get('encoding') == 'base64':
                self.add_feature('body', BytesIO(b64decode(har_entry['response']['content']['text'])))
            else:
                self.add_feature('body', BytesIO(har_entry['response']['content']['text'].encode()))
            self.add_feature('body_hash', hashlib.sha512(self.body.getvalue()).hexdigest())
            if har_entry['response']['content']['mimeType']:
                self.add_feature('mimetype', har_entry['response']['content']['mimeType'])
            else:
                kind = filetype.guess(self.body.getvalue())
                if kind:
                    self.add_feature('mimetype', kind.mime)
                else:
                    self.add_feature('mimetype', '')

            external_ressources, embedded_ressources = find_external_ressources(self.body.getvalue(), self.name, all_requests)
            if 'rendered_html' in self.features:
                rendered_external, rendered_embedded = find_external_ressources(self.rendered_html.getvalue(), self.name, all_requests)
                # for the external ressources, the keys are always the same
                external_ressources = {initiator_type: urls + rendered_external[initiator_type] for initiator_type, urls in external_ressources.items()}

                # for the embedded ressources, the keys are the mimetypes, they may not overlap
                mimetypes = list(embedded_ressources.keys()) + list(rendered_embedded.keys())
                embedded_ressources = {mimetype: embedded_ressources.get(mimetype, []) + rendered_embedded.get(mimetype, []) for mimetype in mimetypes}

            self.add_feature('external_ressources', external_ressources)
            self.add_feature('embedded_ressources', embedded_ressources)
            filename = Path(self.url_split.path).name
            if filename:
                self.add_feature('filename', filename)
            else:
                self.add_feature('filename', 'file.bin')

            # Common JS redirect we can catch easily
            # NOTE: it is extremely fragile and doesn't work very often but is kinda better than nothing.
            # Source: https://stackoverflow.com/questions/13363174/regular-expression-to-catch-as-many-javascript-redirections-as-possible
            regex = re.compile(br"""((location.href)|(window.location)|(location.replace)|(location.assign))(( ?= ?)|( ?\( ?))("|')([^'"]*)("|')( ?\) ?)?;""", re.I)
            matches = re.findall(regex, self.body.getvalue())
            for m in matches:
                # TODO: new type, redirect_js or something like that
                redirect_url = rebuild_url(self.name, m[9].decode(), all_requests)
                if redirect_url in all_requests:
                    self.add_feature('redirect', True)
                    self.add_feature('redirect_url', redirect_url)

            if 'meta_refresh' in self.external_ressources and self.external_ressources.get('meta_refresh'):
                if self.external_ressources['meta_refresh'][0] in all_requests:
                    # TODO: new type, redirect_html or something like that
                    self.add_feature('redirect', True)
                    self.add_feature('redirect_url', self.external_ressources['meta_refresh'][0])

            if 'javascript' in self.mimetype or 'ecmascript' in self.mimetype:
                self.add_feature('js', True)
            elif self.mimetype.startswith('image'):
                self.add_feature('image', True)
            elif self.mimetype.startswith('text/css'):
                self.add_feature('css', True)
            elif 'json' in self.mimetype:
                self.add_feature('json', True)
            elif 'html' in self.mimetype:
                self.add_feature('html', True)
            elif 'font' in self.mimetype:
                self.add_feature('font', True)
            elif 'octet-stream' in self.mimetype:
                self.add_feature('octet_stream', True)
            elif ('text/plain' in self.mimetype or 'xml' in self.mimetype
                    or 'application/x-www-form-urlencoded' in self.mimetype):
                self.add_feature('text', True)
            elif 'video' in self.mimetype:
                self.add_feature('video', True)
            elif 'audio' in self.mimetype:
                self.add_feature('audio', True)
            elif 'mpegurl' in self.mimetype.lower():
                self.add_feature('livestream', True)
            elif ('application/x-shockwave-flash' in self.mimetype
                    or 'application/x-shockware-flash' in self.mimetype):  # Yes, shockwaRe
                self.add_feature('flash', True)
            elif 'application/pdf' in self.mimetype:
                self.add_feature('pdf', True)
            elif not self.mimetype:
                self.add_feature('unset_mimetype', True)
            else:
                self.add_feature('unknown_mimetype', True)
                self.logger.warning(f'Unknown mimetype: {self.mimetype}')

        # NOTE: Chrome/Chromium/Playwright only feature
        if har_entry.get('serverIPAddress'):
            # check ipv6 format
            if har_entry['serverIPAddress'].startswith('['):
                _ipaddress = har_entry['serverIPAddress'][1:-1]
            else:
                _ipaddress = har_entry['serverIPAddress']
            self.add_feature('ip_address', ipaddress.ip_address(_ipaddress))

        # NOTE: Chrome/Chromium only feature
        if '_initiator' in har_entry:
            if har_entry['_initiator']['type'] == 'other':
                pass
            elif har_entry['_initiator']['type'] == 'parser' and har_entry['_initiator']['url']:
                self.add_feature('initiator_url', unquote_plus(har_entry['_initiator']['url']))
            elif har_entry['_initiator']['type'] == 'script':
                url_stack = self._find_initiator_in_stack(har_entry['_initiator']['stack'])
                if url_stack:
                    self.add_feature('initiator_url', url_stack)
            elif har_entry['_initiator']['type'] == 'redirect':
                # FIXME: Need usecase
                raise Exception(f'Got a redirect! - {har_entry}')
            else:
                # FIXME: Need usecase
                raise Exception(har_entry)

        # NOTE: Playwright only feature
        if '_securityDetails' in har_entry and har_entry.get('_securityDetails'):
            if 'validFrom' in har_entry['_securityDetails']:
                har_entry['_securityDetails']['validFrom'] = datetime.fromtimestamp(har_entry['_securityDetails']['validFrom'])
            if 'validTo' in har_entry['_securityDetails']:
                har_entry['_securityDetails']['validTo'] = datetime.fromtimestamp(har_entry['_securityDetails']['validTo'])
            self.add_feature('security_details', har_entry['_securityDetails'])

        if har_entry['response']['redirectURL']:
            self.add_feature('redirect', True)
            redirect_url = har_entry['response']['redirectURL']
            # Rebuild the redirect URL so it matches the entry that sould be in all_requests
            redirect_url = rebuild_url(self.name, redirect_url, all_requests)
            # At this point, we should have a URL available in all_requests...
            if redirect_url in all_requests:
                self.add_feature('redirect_url', redirect_url)
            else:
                # ..... Or not. Unable to find a URL for this redirect
                self.add_feature('redirect_to_nothing', True)
                self.add_feature('redirect_url', har_entry['response']['redirectURL'])
                self.logger.warning('Unable to find that URL: {original_url} - {original_redirect} - {modified_redirect}'.format(
                    original_url=self.name,
                    original_redirect=har_entry['response']['redirectURL'],
                    modified_redirect=redirect_url))
Ejemplo n.º 42
0
def test_get_eth2_staking_deposits_fetch_from_db(  # pylint: disable=unused-argument
        ethereum_manager,
        call_order,
        ethereum_manager_connect_at_start,
        inquirer,
        price_historian,
        freezer,
):
    """
    Test new on-chain requests for existing addresses requires a difference of
    REQUEST_DELTA_TS since last used query range `end_ts`.
    """
    freezer.move_to(datetime.fromtimestamp(EXPECTED_DEPOSITS[0].timestamp))
    ts_now = int(datetime.now().timestamp())  # 1604506685

    database = MagicMock()
    database.get_used_query_range.side_effect = [
        (Timestamp(ts_now - (2 * REQUEST_DELTA_TS)), Timestamp(ts_now)),
        (Timestamp(ts_now - (2 * REQUEST_DELTA_TS)), Timestamp(ts_now)),
        (Timestamp(ts_now - (2 * REQUEST_DELTA_TS)), Timestamp(ts_now)),
    ]
    dbeth2 = MagicMock()
    dbeth2.get_eth2_deposits.side_effect = [
        [],  # no on-chain request, nothing in DB
        [],  # no on-chain request, nothing in DB
        [EXPECTED_DEPOSITS[0]],  # on-chain request, deposit in DB
    ]
    dbeth2_mock = patch('rotkehlchen.chain.ethereum.eth2.DBEth2', return_value=dbeth2)
    with dbeth2_mock, patch(
        'rotkehlchen.chain.ethereum.eth2._get_eth2_staking_deposits_onchain',
    ) as mock_get_eth2_staking_deposits_onchain:
        # 3rd call return
        mock_get_eth2_staking_deposits_onchain.return_value = [EXPECTED_DEPOSITS[0]]

        wait_until_all_nodes_connected(
            ethereum_manager_connect_at_start=ethereum_manager_connect_at_start,
            ethereum=ethereum_manager,
        )
        message_aggregator = MessagesAggregator()

        # First call
        deposit_results_onchain = get_eth2_staking_deposits(
            ethereum=ethereum_manager,
            addresses=[ADDR1],
            msg_aggregator=message_aggregator,
            database=database,
        )
        assert deposit_results_onchain == []
        mock_get_eth2_staking_deposits_onchain.assert_not_called()

        # NB: Move time to ts_now + REQUEST_DELTA_TS - 1s
        freezer.move_to(datetime.fromtimestamp(ts_now + REQUEST_DELTA_TS - 1))

        # Second call
        deposit_results_onchain = get_eth2_staking_deposits(
            ethereum=ethereum_manager,
            addresses=[ADDR1],
            msg_aggregator=message_aggregator,
            database=database,
        )
        assert deposit_results_onchain == []
        mock_get_eth2_staking_deposits_onchain.assert_not_called()

        # NB: Move time to ts_now + REQUEST_DELTA_TS (triggers request)
        freezer.move_to(datetime.fromtimestamp(ts_now + REQUEST_DELTA_TS))

        # Third call
        deposit_results_onchain = get_eth2_staking_deposits(
            ethereum=ethereum_manager,
            addresses=[ADDR1],
            msg_aggregator=message_aggregator,
            database=database,
        )
        assert deposit_results_onchain == [EXPECTED_DEPOSITS[0]]
        mock_get_eth2_staking_deposits_onchain.assert_called_with(
            ethereum=ethereum_manager,
            addresses=[ADDR1],
            msg_aggregator=message_aggregator,
            from_ts=Timestamp(ts_now),
            to_ts=Timestamp(ts_now + REQUEST_DELTA_TS),
        )
Ejemplo n.º 43
0
 def format_from_nanos(self, nanos):
     dt = datetime.fromtimestamp(nanos / 1e9)
     return '{}{:03.0f}'.format(dt.strftime('%Y-%m-%dT%H:%M:%S.%f'), nanos % 1e3)
Ejemplo n.º 44
0
def convertTime(timeVar):
    return (datetime.fromtimestamp(mktime(timeVar))).strftime("%d. %B %Y")
def parse(browser, contests, noOfRows):
    parentDirPath = CollectorUtil.createCollectorImageDir(
        "../Data/OpenContestSubmissionImages", log)
    try:
        urlNo = 1
        for url in contests:
            index = 0
            contestData = []
            designerURLs = set()
            contestID = url.split("-")[-1]
            result = CollectorUtil.getCollectorContestFinalParsingStatus(
                databaseConnection, contestID, log)
            invalidEntry = CollectorUtil.getInvalidEntries(
                databaseConnection, contestID, log)
            print(str(urlNo) + "/" + noOfRows)
            invalidEntry = list(invalidEntry)
            urlNo += 1
            if not ((result[0].lower() == 'finished' and result[1] == 1) or
                    (result[0].lower() == 'locked')):
                while True:
                    try:
                        index += 1
                        if str(index) in invalidEntry:
                            continue
                        CollectorUtil.getImageCollectionURL(
                            browser, index, url, log)
                        try:
                            dataScrapped = browser.find_element_by_id(
                                "standalone-design-details-app-data")
                        except Exception:
                            log.info(
                                "Information collection for contest {} is finished. {} entries found"
                                .format(str(contestID), str(index - 1)))
                            break
                        data = {}
                        jsonObj = json.loads(
                            dataScrapped.get_attribute('innerText'))
                        contestID = str(
                            jsonObj['designCollection']['listingid'])

                        imageDetail = jsonObj['designCollection']['_embedded'][
                            'designs'][0]
                        data['ContestId'] = str(contestID)
                        data['ImageURL'] = str(imageDetail['image_url'])
                        data['Eliminated'] = str(imageDetail['is_eliminated'])
                        data['Withdrawn'] = str(imageDetail['is_withdrawn'])
                        data['Deleted'] = str(imageDetail['is_deleted'])
                        data['EntryId'] = str(imageDetail['entry_id'])
                        data['Winner'] = str(imageDetail['is_winner'])
                        data['Rating'] = str(imageDetail['rating'])
                        data['LastUpdated'] = str(
                            datetime.fromtimestamp(
                                imageDetail['time_created']).astimezone().
                            strftime("%Y-%m-%d %H:%M:%S"))
                        # str(datetime.utcnow())
                        data['OwnerProfileUrl'] = str(
                            jsonObj['designCollection']['_embedded']['designs']
                            [0]['_embedded']['user']['profileUrl'])
                        designerURLs.add((data['OwnerProfileUrl']))
                    except Exception as err:
                        print(
                            "An error occurred while parsing dom for contest {} => {}"
                            .format(str(contestID), str(err)))
                        traceback.print_stack()
                        browser.close()
                        raise Exception(
                            "An error occurred while parsing dom for contest {} => {}"
                            .format(str(contestID), str(err)))
                    CollectorUtil.getCollectorImage(contestID, data,
                                                    parentDirPath, True, log)
                    contestData.append(data)
                CollectorUtil.insertCollectorContestImageInfo(
                    databaseConnection, contestData, log)
                CollectorUtil.insertCollectorDesigner(databaseConnection,
                                                      designerURLs)
                CollectorUtil.updateCollectorParsingStatusForFinishedContest(
                    databaseConnection, contestID,
                    result[0].lower() == 'finished')
                databaseConnection.commit()
    except Exception as err:
        log.error("Error occurred while parsing contest id {} => {}".format(
            str(contestID), str(err)))
        traceback.print_stack()
        raise Exception(
            "Error occurred while parsing contest id {} => {}".format(
                str(contestID), str(err)))
Ejemplo n.º 46
0
def date_from_timestamp(d):
    try:
        return datetime.fromtimestamp(int(d))
    except:
        return None
Ejemplo n.º 47
0
 def update_taken_at(self):
     self._post.update_taken_at(datetime.fromtimestamp(self._info["taken_at"]))
Ejemplo n.º 48
0
            continue

        if mibName in mibsRevisions:
            dstMibRevision = mibsRevisions[mibName]

        else:
            try:
                _, dstMibRevision = getMibRevision(dstDirectory, mibName)

            except error.PySmiError as ex:
                if verboseFlag:
                    sys.stderr.write('MIB "%s" is not available at the '
                                     'destination directory "%s": %s\r\n' % (os.path.join(srcDirectory, mibFile),
                                                                             dstDirectory, ex))

                dstMibRevision = datetime.fromtimestamp(0)

            mibsRevisions[mibName] = dstMibRevision

        if dstMibRevision >= srcMibRevision:
            if verboseFlag:
                sys.stderr.write('Destination MIB "%s" has the same or newer revision as the '
                                 'source MIB "%s"\r\n' % (os.path.join(dstDirectory, mibName),
                                                          os.path.join(srcDirectory, mibFile)))
            if not quietFlag:
                sys.stderr.write('NOT COPIED %s (%s)\r\n' % (
                    shortenPath(os.path.join(srcDirectory, mibFile)), mibName))

            continue

        mibsRevisions[mibName] = srcMibRevision
Ejemplo n.º 49
0
        property = key.replace('_', '.', 10).replace('-', '.')
        re_obj = re.compile(property)
        for my_key in properties:
            my_key = str(my_key)
            if re.match(re_obj, my_key):
                property = new_properties[key]
                new_conf[my_key] = property

    properties.update(new_conf)
    new_sha = hashlib.sha256(json.dumps(properties)).hexdigest()

    if original_sha == new_sha:
        print "Nothing to update"
        sys.exit(0)

    timestamp = int((datetime.now() - datetime.fromtimestamp(0)).total_seconds()) * 1000
    new_version = 'version{}'.format(timestamp)

    data = {'desired_config': {
        'type': config_name,
        'tag': new_version,
        'properties': properties
    }
    }
    cluster.update(Clusters=data)


def main():
    module = None

    module = AnsibleModule(
Ejemplo n.º 50
0
def filter_updated(timestamp):
    """Web app, feed template, time in xsd:dateTime format"""
    return datetime.fromtimestamp(int(timestamp)).strftime('%Y-%m-%dT%H:%M:%S')
Ejemplo n.º 51
0
def checkin(request):
    if request.method != 'POST':
        print 'not post data'
        raise Http404

    data = request.POST
    key = data.get('key')
    serial = data.get('serial')
    serial = serial.upper()

    # Take out some of the weird junk VMware puts in. Keep an eye out in case Apple actually uses these:
    serial = serial.replace('/', '')
    serial = serial.replace('+', '')

    # Are we using Sal for some sort of inventory (like, I don't know, Puppet?)
    try:
        add_new_machines = settings.ADD_NEW_MACHINES
    except:
        add_new_machines = True

    if add_new_machines == True:
        # look for serial number - if it doesn't exist, create one
        if serial:
            try:
                machine = Machine.objects.get(serial=serial)
            except Machine.DoesNotExist:
                machine = Machine(serial=serial)
    else:
        machine = get_object_or_404(Machine, serial=serial)

    if key is None or key == 'None':
        try:
            key = settings.DEFAULT_MACHINE_GROUP_KEY
        except Exception:
            pass

    machine_group = get_object_or_404(MachineGroup, key=key)

    business_unit = machine_group.business_unit
    try:
        historical_setting = SalSetting.objects.get(name='historical_retention')
        historical_days = historical_setting.value
    except SalSetting.DoesNotExist:
        historical_setting = SalSetting(name='historical_retention', value='180')
        historical_setting.save()
        historical_days = '180'
    
    if machine:
        machine.hostname = data.get('name', '<NO NAME>')
        try:
            use_enc = settings.USE_ENC
            # If we're using Sal's Puppet ENC, don't change the machine group,
            # as we're setting it in the GUI
        except:
            use_enc = False

        if use_enc == False:
            machine.machine_group = machine_group
        machine.last_checkin = datetime.now()
        if 'username' in data:
            machine.username = data.get('username')
        if 'base64bz2report' in data:
            machine.update_report(data.get('base64bz2report'))

        if 'sal_version' in data:
            machine.sal_version = data.get('sal_version')

        # extract machine data from the report
        report_data = machine.get_report()
        if 'Puppet_Version' in report_data:
            machine.puppet_version = report_data['Puppet_Version']
        if 'ManifestName' in report_data:
            manifest = report_data['ManifestName']
            machine.manifest = manifest
        if 'MachineInfo' in report_data:
            machine.operating_system = report_data['MachineInfo'].get(
                'os_vers', 'UNKNOWN')
            # some machines are reporting 10.9, some 10.9.0 - make them the same
            if len(machine.operating_system) <= 4:
                machine.operating_system = machine.operating_system + '.0'
        machine.hd_space = report_data.get('AvailableDiskSpace') or 0
        machine.hd_total = int(data.get('disk_size')) or 0

        machine.hd_percent = int(round(((float(machine.hd_total)-float(machine.hd_space))/float(machine.hd_total))*100))
        machine.munki_version = report_data.get('ManagedInstallVersion') or 0
        hwinfo = {}
        if 'SystemProfile' in report_data.get('MachineInfo', []):
            for profile in report_data['MachineInfo']['SystemProfile']:
                if profile['_dataType'] == 'SPHardwareDataType':
                    hwinfo = profile._items[0]
                    break
        if 'Puppet' in report_data:
            puppet = report_data.get('Puppet')
            if 'time' in puppet:
                machine.last_puppet_run = datetime.fromtimestamp(float(puppet['time']['last_run']))
            if 'events' in puppet:
                machine.puppet_errors = puppet['events']['failure']

        if hwinfo:
            machine.machine_model = hwinfo.get('machine_model')
            machine.cpu_type = hwinfo.get('cpu_type')
            machine.cpu_speed = hwinfo.get('current_processor_speed')
            machine.memory = hwinfo.get('physical_memory')

            if hwinfo.get('physical_memory')[-2:] == 'MB':
                memory_mb = float(hwinfo.get('physical_memory')[:-3])
                machine.memory_kb = int(memory_mb * 1024)
            if hwinfo.get('physical_memory')[-2:] == 'GB':
                memory_gb = float(hwinfo.get('physical_memory')[:-3])
                machine.memory_kb = int(memory_gb * 1024 * 1024)
            if hwinfo.get('physical_memory')[-2:] == 'TB':
                memory_tb = float(hwinfo.get('physical_memory')[:-3])
                machine.memory_kb = int(memory_tb * 1024 * 1024 * 1024)

        if 'os_family' in report_data:
            machine.os_family = report_data['os_family']

        machine.save()

        # Remove existing PendingUpdates for the machine
        updates = machine.pending_updates.all()
        updates.delete()
        if 'ItemsToInstall' in report_data:
            for update in report_data.get('ItemsToInstall'):
                display_name = update.get('display_name', update['name'])
                update_name = update.get('name')
                version = str(update['version_to_install'])
                pending_update = PendingUpdate(machine=machine, display_name=display_name, update_version=version, update=update_name)
                pending_update.save()

        # Remove existing PendingAppleUpdates for the machine
        updates = machine.pending_apple_updates.all()
        updates.delete()
        if 'AppleUpdates' in report_data:
            for update in report_data.get('AppleUpdates'):
                display_name = update.get('display_name', update['name'])
                update_name = update.get('name')
                version = str(update['version_to_install'])
                pending_update = PendingAppleUpdate(machine=machine, display_name=display_name, update_version=version, update=update_name)
                pending_update.save()

        # if Facter data is submitted, we need to first remove any existing facts for this machine
        if 'Facter' in report_data:
            facts = machine.facts.all()
            facts.delete()
            # Delete old historical facts

            try:
                datelimit = datetime.now() - timedelta(days=historical_days)
                HistoricalFact.objects.filter(fact_recorded__lt=datelimit).delete()
            except Exception:
                pass
            try:
                historical_facts = settings.HISTORICAL_FACTS
            except Exception:
                historical_facts = []
                pass
            # now we need to loop over the submitted facts and save them
            for fact_name, fact_data in report_data['Facter'].iteritems():
                fact = Fact(machine=machine, fact_name=fact_name, fact_data=fact_data)
                fact.save()
                if fact_name in historical_facts:
                    fact = HistoricalFact(machine=machine, fact_name=fact_name, fact_data=fact_data, fact_recorded=datetime.now())
                    fact.save()

        if 'Conditions' in report_data:
            conditions = machine.conditions.all()
            conditions.delete()
            for condition_name, condition_data in report_data['Conditions'].iteritems():
                # if it's a list (more than one result), we're going to conacetnate it into one comma separated string
                if type(condition_data) == list:
                    result = None
                    for item in condition_data:
                        # is this the first loop? If so, no need for a comma
                        if result:
                            result = result + ', '+str(item)
                        else:
                            result = item
                    condition_data = result

                #print condition_data
                condition = Condition(machine=machine, condition_name=condition_name, condition_data=str(condition_data))
                condition.save()

        if 'osquery' in report_data:
            try:
                datelimit = (datetime.now() - timedelta(days=historical_days)).strftime("%s")
                OSQueryResult.objects.filter(unix_time__lt=datelimit).delete()
            except:
                pass
            for report in report_data['osquery']:
                unix_time = int(report['unixTime'])
                # Have we already processed this report?
                try:
                    osqueryresult = OSQueryResult.objects.get(hostidentifier=report['hostIdentifier'], machine=machine, unix_time=unix_time, name=report['name'])
                    continue
                except OSQueryResult.DoesNotExist:
                    osqueryresult = OSQueryResult(hostidentifier=report['hostIdentifier'], machine=machine, unix_time=unix_time, name=report['name'])
                    osqueryresult.save()

                for items in report['diffResults']['added']:
                    for column, col_data in items.items():
                        osquerycolumn = OSQueryColumn(osquery_result=osqueryresult, action='added', column_name=column, column_data=col_data)
                        osquerycolumn.save()

                for item in report['diffResults']['removed']:
                    for column, col_data in items.items():
                        osquerycolumn = OSQueryColumn(osquery_result=osqueryresult, action='removed', column_name=column, column_data=col_data)
                        osquerycolumn.save()

        return HttpResponse("Sal report submmitted for %s"
                            % data.get('name'))
Ejemplo n.º 52
0
def timestamp_to_datetime(timestamp):
    if timestamp is None:
        return None
    return datetime.fromtimestamp(timestamp)
Ejemplo n.º 53
0
Weather_data = getWeather(currentWeather_url,api_key, city_name, units)

Forecast_data = getWeather(forcastWeather_url,api_key, city_name, units)

if Weather_data['cod'] != 200:
    print('Error: ',Weather_data['cod'])
else:
    ts_now = datetime.now()
    c_temp = str(round(Weather_data['main']['temp'],1))
    c_feelsLike = str(round(Weather_data['main']['feels_like'],1))
    c_description = Weather_data['weather'][0]['description']
    c_humidity = str(Weather_data['main']['humidity'])
    c_pressure = str(round((Weather_data['main']['pressure']/33.8637526),1)) # divide by 33.8637526 converts milbars to inches
    c_windSpeed = str(round(Weather_data['wind']['speed'],1))
    c_windDeg = Weather_data['wind']['deg']
    ts_sunrise = datetime.fromtimestamp(Weather_data['sys']['sunrise'])
    ts_sunset = datetime.fromtimestamp(Weather_data['sys']['sunset'])
    t_sunrise = ts_sunrise.strftime('%l:%M%p')
    t_sunset = ts_sunset.strftime('%l:%M%p')
    print("")
    print(ts_now.strftime('%a:%m/%d'))
    print("\t",ts_now.strftime('%l:%M%p'))
    print ("\t\tTemp: " + c_temp +"F | " +
       "Feels Like: " + c_feelsLike + "F | " +
        c_description.title() + " | \n\t\t"  +
        "Wind: " + winddirection(c_windDeg) , c_windSpeed + " MPH | " +
        " Humidity: " + c_humidity + "% | "  +
        " Barometer: " + c_pressure + "in | \n\t\t" +
        "Sunrise:" + t_sunrise + " | Sunset:" + t_sunset + " |")

Ejemplo n.º 54
0
    async def run_info(self, context):
        """
        Get the event information embedded message
        :param context:
        :return:
        """
        user = User(context.message.author.id, context.guild.id, context)
        event = Event.get_by_guild(user.get_guild())
        config = lib.get('./settings.json')

        # Make sure there is an event
        if event is None:
            return await context.send(user.get_mention() + ', ' + lib.get_string('event:err:noexists', user.get_guild()))

        # Work out which timezone to use when displaying the start and end dates.
        start_date = lib.get_string('na', user.get_guild())
        end_date = lib.get_string('na', user.get_guild())
        user_timezone = user.get_setting('timezone')
        if not user_timezone:
            user_timezone = 'UTC'

        timezone = pytz.timezone(user_timezone)

        # Is it scheduled with start and end dates?
        if event.is_scheduled():
            start = datetime.fromtimestamp(event.get_start_time())
            end = datetime.fromtimestamp(event.get_end_time())
            start_date = start.astimezone(timezone).strftime('%d-%m-%Y %H:%M:%S') + ' ('+user_timezone+')'
            end_date = end.astimezone(timezone).strftime('%d-%m-%Y %H:%M:%S') + ' ('+user_timezone+')'

        # Get the running status
        if event.is_running():
            status = lib.get_string('event:started', user.get_guild())
        else:
            status = lib.get_string('event:notyetstarted', user.get_guild())

        # Get the number of users in the event and how many words they have written in it so far
        writers = len(event.get_users())
        words = event.get_total_wordcount()

        # Get the description of the event and add to the end of the status, or just display the status if the description is empty
        description = event.get_description()
        if description and len(description) > 0:
            description = status + '\n\n' + description
        else:
            description = status

        # Get the thumbnail image to use
        image = event.get_image()
        if not image or len(image) == 0:
            image = config.avatar

        # Build the embedded message.
        embed = discord.Embed(title=event.get_title(), color=event.get_colour(), description=description)
        embed.set_thumbnail(url=image)
        embed.add_field(name=lib.get_string('event:startdate', user.get_guild()), value=start_date, inline=False)
        embed.add_field(name=lib.get_string('event:enddate', user.get_guild()), value=end_date, inline=False)
        embed.add_field(name=lib.get_string('event:numwriters', user.get_guild()), value=str(writers), inline=True)
        embed.add_field(name=lib.get_string('event:numwords', user.get_guild()), value=str(words), inline=True)

        # Send the message
        return await context.send(embed=embed)
Ejemplo n.º 55
0
    def _process_publish_files(
        self,
        sg_publishes,
        publish_template,
        work_template,
        context,
        name_map,
        version_compare_ignore_fields,
        filter_file_key=None,
    ):
        """
        """
        files = {}

        # and add in publish details:
        ctx_fields = context.as_template_fields(work_template)

        for sg_publish in sg_publishes:
            file_details = {}

            # always have a path:
            publish_path = sg_publish["path"]

            # determine the work path fields from the publish fields + ctx fields:
            # The order is important as it ensures that the user is correct if the
            # publish file is in a user sandbox but we also need to be careful not
            # to overrwrite fields that are being ignored when comparing work files
            publish_fields = publish_template.get_fields(publish_path)
            wp_fields = publish_fields.copy()
            for k, v in ctx_fields.items():
                if k not in version_compare_ignore_fields:
                    wp_fields[k] = v

            # build the unique file key for the publish path.  All files that share the same key are considered
            # to be different versions of the same file.
            file_key = FileItem.build_file_key(
                wp_fields, work_template, version_compare_ignore_fields
            )
            if filter_file_key and file_key != filter_file_key:
                # we can ignore this file completely!
                continue

            # resolve the work path:
            work_path = ""
            try:
                work_path = work_template.apply_fields(wp_fields)
            except TankError as e:
                # unable to generate a work path - this means we are probably missing a field so it's going to
                # be a problem matching this publish up with its corresponding work file!
                work_path = ""

            # copy common fields from sg_publish:
            #
            file_details = dict(
                [(k, v) for k, v in six.iteritems(sg_publish) if k != "path"]
            )

            # get version from fields if not specified in publish file:
            if file_details["version"] == None:
                file_details["version"] = publish_fields.get("version", 0)

            # entity
            file_details["entity"] = context.entity

            # local file modified details:
            if os.path.exists(publish_path):
                try:
                    modified_at = os.path.getmtime(publish_path)
                    file_details["modified_at"] = datetime.fromtimestamp(
                        modified_at, tz=sg_timezone.local
                    )
                except OSError:
                    # ignore OSErrors as it's probably a permissions thing!
                    pass
                file_details["modified_by"] = g_user_cache.get_file_last_modified_user(
                    publish_path
                )
            else:
                # just use the publish info
                file_details["modified_at"] = sg_publish.get("published_at")
                file_details["modified_by"] = sg_publish.get("published_by")

            if not file_details["name"]:
                # make sure all files with the same key have the same name:
                file_details["name"] = name_map.get_name(
                    file_key, publish_path, publish_template, publish_fields
                )

            # add new file item for this publish.  Note that we also keep track of the
            # work path even though we don't know if this publish has a corresponding
            # work file.
            files[(file_key, file_details["version"])] = {
                "key": file_key,
                "work_path": work_path,
                "is_published": True,
                "publish_path": publish_path,
                "publish_details": file_details,
            }
        return files
Ejemplo n.º 56
0
def ticktobar():
    config = incept_config()
    config['is_tick'] = True
    workdays = TradingPeriod.get_workdays(begin=config['begin'],
                                          end=config['end'])
    workdays_exchange_trading_period_by_ts = \
        TradingPeriod.get_workdays_exchange_trading_period(
            _workdays=workdays, exchange_trading_period=EXCHANGE_TRADING_PERIOD)

    date_converters = list()

    for granularity in config['granularities']:
        date_converter = DateConverter()
        date_converter.name = config['name']
        date_converter.interval = 60 * granularity
        date_converters.append(date_converter)

    lines = list()

    with open(config['data_source']) as f:
        for i, line in enumerate(f):

            # 忽略 csv 头
            if i == 0:
                continue

            lines.append(line)

    for i, line in enumerate(lines):

        if i % 10000 == 0:
            print ' '.join([time.strftime('%H:%M:%S'), i.__str__()])

        depth_market_data = dict()
        row = line.split(',')

        if not row[1].strip()[0].isdigit():
            # print row[1]
            continue

        row[0] = row[0].replace('/', '-')

        if config['offset'] > 0:
            if config['is_tick']:
                dt = datetime.strptime(row[0], "%Y-%m-%d %H:%M:%S.%f")
                ts = time.mktime(dt.timetuple()) + (dt.microsecond / 1e6)
            else:
                ts = int(
                    time.mktime(time.strptime(row[0], "%Y-%m-%d %H:%M:%S")))

            ts += config['offset']

            if config['is_tick']:
                row[0] = datetime.strftime(datetime.fromtimestamp(ts),
                                           "%Y-%m-%d %H:%M:%S.%f")
            else:
                row[0] = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(ts))

        date_time = row[0].split(' ')

        if date_time[0] not in workdays_exchange_trading_period_by_ts:
            continue

        if not trading_time_filter(
                date_time=row[0],
                contract_code=config['contract_code'],
                exchange_trading_period_by_ts=
                workdays_exchange_trading_period_by_ts[date_time[0]]):
            continue

        depth_market_data['action_day'] = ''.join(date_time[0].split('-'))
        depth_market_data['update_time'] = date_time[1]

        depth_market_data['last_price'] = row[1].strip()

        if depth_market_data['last_price'].isdigit():
            depth_market_data['last_price'] = int(
                depth_market_data['last_price'])
        else:
            try:
                depth_market_data['last_price'] = float(
                    '%0.2f' % float(depth_market_data['last_price']))

            except ValueError:
                continue

        for date_converter in date_converters:
            date_converter.data_pump(depth_market_data=depth_market_data,
                                     save_dir_path=config['output_dir'])

    for date_converter in date_converters:
        date_converter.save_last()
Ejemplo n.º 57
0
def human_time(ts):
    return datetime.fromtimestamp(float(ts)).strftime("%Y-%m-%d %H:%M:%S")
if lU == 'FileWasEmpty!':
	lU = PG.parsePriceFindLastValue(coinGeckoStartUnixTime)
	print("Warning, file was empty, init zero params!")

# Find the same but in MongoDB;
lastUnixTimeinDB = MC.findLastPriceDataUnixTime(collectionForPrices)

while True:
	lU = PG.parsePriceFindLastValue(coinGeckoStartUnixTime)
	unixTime = MC.findLastPriceGtThan(collectionForPrices, lU)
	if unixTime == 'Empty':
		# Send new JSON to FE;
		PG.sendJSONtoFronend()
		timeSet = strftime("%Y-%m-%d %H:%M:%S", gmtime())
		print(timeSet +" ***JSON copied to FE instance***")
		print(timeSet +" All tasks were successful.")
		break
	else:
		printTime = (datetime.fromtimestamp(unixTime)).strftime('%Y-%m-%d %H:%M:%S')
		price = MC.findLastMarketCapQuick(collectionForPrices, unixTime)
		if price == 'KeyError':
			print('WARNING! Cannot parse price in unixTime, KeyError: ' + str(unixTime))
			sys.exit(1)
		resJSON = PG.appendNewContentToPriceGraph(float(price), unixTime)
		resWrite = PG.writeJSONtoFile(resJSON)
		if resWrite == 'OK':
			timeSet = strftime("%Y-%m-%d %H:%M:%S", gmtime())
			print timeSet + " Found MarketCap: " + str(price) + " // We at " + str(printTime)
		else:
			print("FATAL!")
			sys.exit(1)
Ejemplo n.º 59
0
def file_modified_datetime(path):
    unix_t = os.stat(path).st_mtime
    return datetime.fromtimestamp(unix_t)
Ejemplo n.º 60
0
    def _process_work_files(
        self,
        work_files,
        work_template,
        context,
        name_map,
        version_compare_ignore_fields,
        filter_file_key=None,
    ):
        """
        :param work_files: A list of dictionaries with file details.
        :param work_template: The template which was used to generate the files list.
        :param context: The context for which the files are retrieved.
        :param name_map: A :class:`_FileNameMap` instance.
        :param version_compare_ignore_fields: A list of template fields to ignore
                                              when building a key for the file.
        :param filter_file_key: A unique file 'key' that, if specified, will limit
                                the returned list of files to just those that match.
        returns: A dictionary where keys are (file key, version number) tuples
                  and values are dictionaries which can be used to instantiate
                  :class:`FileItem`.
        """
        files = {}

        for work_file in work_files:

            # always have the work path:
            work_path = work_file["path"]

            # get fields for work file:
            wf_fields = work_template.get_fields(work_path)
            wf_ctx = None

            # Build the unique file key for the work path.
            # All files that share the same key are considered
            # to be different versions of the same file.
            #
            file_key = FileItem.build_file_key(
                wf_fields, work_template, version_compare_ignore_fields
            )
            if filter_file_key and file_key != filter_file_key:
                # we can ignore this file completely!
                continue

            # copy common fields from work_file:
            #
            file_details = dict(
                [(k, v) for k, v in six.iteritems(work_file) if k != "path"]
            )

            # get version from fields if not specified in work file:
            if not file_details["version"]:
                file_details["version"] = wf_fields.get("version", 0)

            # if no task try to determine from context or path:
            if not file_details["task"]:
                if context.task:
                    file_details["task"] = context.task
                else:
                    # try to create a context from the path and see if that contains a task:
                    wf_ctx = self._app.sgtk.context_from_path(work_path, context)
                    if wf_ctx and wf_ctx.task:
                        file_details["task"] = wf_ctx.task

            # Add additional fields:
            #

            # Entity:
            file_details["entity"] = context.entity

            # File modified details:
            if not file_details["modified_at"]:
                try:
                    modified_at = os.path.getmtime(work_path)
                    file_details["modified_at"] = datetime.fromtimestamp(
                        modified_at, tz=sg_timezone.local
                    )
                except OSError:
                    # ignore OSErrors as it's probably a permissions thing!
                    pass

            if not file_details["modified_by"]:
                file_details["modified_by"] = g_user_cache.get_file_last_modified_user(
                    work_path
                )

            if not file_details["name"]:
                # make sure all files with the same key have the same name:
                file_details["name"] = name_map.get_name(
                    file_key, work_path, work_template, wf_fields
                )

            # add to the list of files
            files[(file_key, file_details["version"])] = {
                "key": file_key,
                "is_work_file": True,
                "work_path": work_path,
                "work_details": file_details,
            }

        return files