def main(limit=50):#len(ALL)): # def main(limit=len(ALL)): d = [a for a in (ALL.items())[:limit]] total = float(limit) out = [] i = 0 record_count = 0 print 'Exporting patrons...' ft = open(export_dir+'PATRONS.marc.txt', 'w') fb = open(export_dir+'PATRONS.marc.dat', 'wb') for (recid, record) in d: i = i + 1 if 'EXPIR' in record and record['EXPIR']=='None': expiration_date = datetime.strptime('9999-1-1', '%Y-%m-%d') elif 'EXPIR' in record: try: expiration_date = datetime.strptime(format_date(record['EXPIR'], '%Y-%m-%d %H:%M:%S'), '%Y-%m-%d') except: expiration_date = datetime.strptime(format_date(record['EXPIR'], '%m/%d/%Y'), '%Y-%m-%d') if not 'EXPIR' in record or ('EXPIR' in record and expiration_date > datetime.now()): if 'PA' not in record or ('PA' in record and len(record['PA'])!=3): rec_binary = format_record(recid, record) fb.write(rec_binary.as_marc()) ft.write(str(rec_binary) + '\n==================\n') record_count = record_count + 1 if i > limit: break update_progress(i*100/total) fb.close() ft.close() print "\nPatrons exported: %d/%d" % (record_count, limit)
def get_games(date, output_file=None): # games_url = base + '/scoreboard/' + format_date(date) + '/games.json' games_url = si_base + 'schedule' #print format_date(date) result = requests.get(games_url, params={'date': format_date(date)}) #print games_url + format_date(date) soup = BeautifulSoup(result.text) #date_string = date.strftime('%B %d,%Y') games = soup.find_all('tr', 'component-scoreboard-list final') game_ids = [] for game in games: game_date_elem = game.find('div', 'game-anchor') game_date_text = game_date_elem['id'] game_date = date_parser.parse(game_date_text).date() if game_date == date: game_id = int(game['data-id']) game_ids.append(game_id) if output_file is not None: of = open(output_file, 'w') of.write(json.dumps({'game_date': format_date(date), 'game_ids': game_ids})) of.close() return game_ids
def make_admin_round_details(rnd, rnd_stats): # TODO: This should be depricated in favor of rnd.to_details_dict(), which # is similar except for the stats dict structure. ret = { 'id': rnd.id, 'name': rnd.name, 'directions': rnd.directions, 'canonical_url_name': slugify(rnd.name, '-'), 'vote_method': rnd.vote_method, 'open_date': format_date(rnd.open_date), 'close_date': format_date(rnd.close_date), 'config': rnd.config, 'deadline_date': format_date(rnd.deadline_date), 'status': rnd.status, 'quorum': rnd.quorum, 'total_entries': len(rnd.entries), 'total_tasks': rnd_stats['total_tasks'], 'total_open_tasks': rnd_stats['total_open_tasks'], 'percent_tasks_open': rnd_stats['percent_tasks_open'], 'total_disqualified_entries': rnd_stats['total_disqualified_entries'], 'campaign': rnd.campaign.to_info_dict(), 'stats': rnd_stats, 'jurors': [rj.to_details_dict() for rj in rnd.round_jurors] } return ret
def find_resources(self, project_id, meter, start=None, end=None): query_list = [ {"=": {"project_id": project_id}}, ] if end is not None: query_list.append( {"or": [ {"gt": {"revision_end": utils.format_date(end)}}, {"=": {"revision_end": None}}, ]}) if start is not None: query_list.append( {"or": [ {"lt": {"revision_start": utils.format_date(start)}}, {"=": {"revision_start": None}}, ]}) query = {"and": query_list} resources = self._gnocchi.resource.search( resource_type="instance", details=True, history=True, query=query, ) self.logger.debug("Got %d resources" % len(resources)) ret = [] for r in resources: if meter in r['metrics']: ret.append(r['id']) return ret
def _process_UP(x): parts = split_subfields(x) subfields = {} subfields['a'] = parts[0] if len(parts) == 2: subfields['b'] = format_date(parts[1].lower(), '%d %b %Y') elif len(parts) > 2: subfields['b'] = format_date(parts[1], '%Y %m %d') subfields['c'] = parts[2] return [{'tag': tag_number, 'ind1': ' ', 'ind2': ' ', 'subs': subfields}]
def create_tag_metadata(key, value, metadata, timestamp=None): if timestamp is None: timestamp = datetime.datetime.utcnow() query = ''' mutation($tag: TagPrimary!, $metadata: String!, $timestamp: Datetime!) { metadata: create_tag_metadata(tag: $tag, metadata: $metadata, timestamp: $timestamp) { timestamp metadata } } ''' # noqa: E501 variables = { 'tag': { 'key': key, 'value': value }, 'metadata': json.dumps(metadata), 'timestamp': utils.format_date(timestamp) } logger.info( "Creating new tag metadata for tag {key}={value}[{metadata}]".format( key=key, value=value, metadata=metadata)) return graphql(query, variables)['metadata']
def result(self): rv = {} user = cache_registry.lookup("pinwall.users.UserService.user_by_id")( self.user_id) user_artifact = cache_registry.lookup( "pinwall.artifacts.ArtifactService.artifact_by_userid")( self.user_id) rv['user'] = user if user else None rv['artifact_count'] = len( user_artifact.get("artifact_ids")) if user_artifact else 0 rv['honor_sum'] = user_artifact.get( "honor_sum") if user_artifact else 0 comment_count = cache_registry.lookup( "pinwall.users.UserService.user_comment_count")(self.user_id) rv['comment_count'] = comment_count if comment_count else 0 if self.load_top_10_artifacts: first_load_artifact_ids = cache_registry.lookup( "pinwall.users.UserService.first_load_artifact_ids")( self.user_id) artifacts = [] if first_load_artifact_ids: for artifact_id in first_load_artifact_ids: artifact = ArtifactResultProxy(artifact_id, show_user=False).result() if artifact: artifact["created_at"] = format_date( artifact.get("created_at")) artifacts.append(artifact) rv["artifacts"] = artifacts return rv
def add_coordinator(user_dao, campaign_id, request_dict): """ Summary: - Add a new coordinator identified by Wikimedia username to a campaign identified by campaign ID Request model: username Response model: username last_active_date campaign_id Errors: 403: User does not have permission to add coordinators """ coord_dao = CoordinatorDAO.from_campaign(user_dao, campaign_id) new_user_name = request_dict.get('username') new_coord = coord_dao.add_coordinator(new_user_name) data = {'username': new_coord.username, 'campaign_id': campaign_id, 'last_active_date': format_date(new_coord.last_active_date)} return {'data': data}
def remove_organizer(user_dao, request_dict): maint_dao = MaintainerDAO(user_dao) username = request_dict.get('username') old_organizer = maint_dao.remove_organizer(username) data = {'username': username, 'last_active_date': format_date(old_organizer.last_active_date)} return {'data': data}
def parse_file(self): ''' parse log file ''' cur_line = self.get_next_line_log() while cur_line: cur_dict = self.analyse_line(cur_line) if cur_dict: key_name = self.generate_key_name( cur_dict.get('func_name'), cur_dict.get('log_level'), ) tmp_dict = self.result_dict.setdefault(key_name, {}) tmp_dict['file_path'] = cur_dict.get('file_path') tmp_dict['file_line'] = cur_dict.get('file_line') tmp_dict['count'] = tmp_dict.get('count', 0) + 1 tmp_dict['message'] = cur_dict.get('message') tmp_dict['created_at'] = format_date(cur_dict.get('log_date')) tmp_dict.setdefault( 'content', [] ).append(cur_dict.get('content')) else: pass cur_line = self.get_next_line_log()
def add_organizer(user_dao, request_dict): """ Summary: Add a new organizer identified by Wikimedia username Request mode: username: type: string Response model: username: type: string last_active_date: type: date-time Errors: 403: User does not have permission to add organizers """ maint_dao = MaintainerDAO(user_dao) new_user_name = request_dict.get('username') new_organizer = maint_dao.add_organizer(new_user_name) data = { 'username': new_organizer.username, 'last_active_date': format_date(new_organizer.last_active_date) } return {'data': data}
def get_events(self): events = sorted(self.get_milestone_events() + self.get_wiki_events(), key=lambda e: e["date"]) for event in events: today = datetime.now().date() if event["date"].date() == today: group = None elif event["date"].date() == today + timedelta(days=1): group = "Tomorrow" elif (event["date"].isocalendar()[1] == today.isocalendar()[1] and event["date"].year == today.year): group = "This Week" elif (event["date"].isocalendar()[1] == today.isocalendar()[1] + 1 and event["date"].year == today.year): group = "Next Week" elif event["date"].month == today.month: group = "Later This Month" else: group = "Future" event.update({ "date": format_date(event["date"]), "group": group, }) return events
def parse_file(self): ''' parse log file ''' cur_line = self.get_next_line_log() while cur_line: cur_dict = self.analyse_line(cur_line) if cur_dict: key_name = self.generate_key_name( cur_dict.get('func_name'), #cur_dict.get('log_level'), ) tmp_dict = {} tmp_dict['log_level'] = cur_dict.get('log_level') tmp_dict['file_path'] = cur_dict.get('file_path') tmp_dict['file_line'] = cur_dict.get('file_line') tmp_dict['message'] = cur_dict.get('message') tmp_dict['created_at'] = format_date(cur_dict.get('log_date')) tmp_dict['content'] = cur_dict.get('content', '') put_one_record(key_name, tmp_dict) else: pass cur_line = self.get_next_line_log()
def get_events(self): events = sorted(self.get_milestone_events() + self.get_wiki_events(), key=lambda e: e["date"]) for event in events: event["date"] = format_date(event["date"]) return events
def process_EXPIR(x): template = '%Y-%m-%d %H:%M:%S' # if '-' in x: # template = '%Y-%m-%d %H:%M:%S' if '/' in x: template = '%m/%d/%Y' date = format_date(x, template) return [{'tag': '108', 'ind1': ' ', 'ind2': ' ', 'subs': {'a': date}}]
def remove_coordinator(user_dao, campaign_id, request_dict): coord_dao = CoordinatorDAO.from_campaign(user_dao, campaign_id) username = request_dict.get('username') old_coord = coord_dao.remove_coordinator(username) data = {'username': username, 'campaign_id': campaign_id, 'last_active_date': format_date(old_coord.last_active_date)} return {'data': data}
def format_date(self, date, expression): """Use a date format string method to return formatted datetime. You should override this method to force UTF-8 decode or something like this (until we find a better and agnosthic solution). Please don't hack this method up. Just override it on your report class.""" return format_date(date, expression)
def make_juror_round_details(rnd, rnd_stats): ret = { 'id': rnd.id, 'directions': rnd.directions, 'name': rnd.name, 'vote_method': rnd.vote_method, 'open_date': format_date(rnd.open_date), 'close_date': format_date(rnd.close_date), 'deadline_date': format_date(rnd.deadline_date), 'status': rnd.status, 'canonical_url_name': slugify(rnd.name, '-'), 'config': rnd.config, 'total_tasks': rnd_stats['total_tasks'], 'total_open_tasks': rnd_stats['total_open_tasks'], 'percent_tasks_open': rnd_stats['percent_tasks_open'], 'campaign': rnd.campaign.to_info_dict() } return ret
def remove_coordinator(user_dao, campaign_id, request_dict): org_dao = OrganizerDAO(user_dao) username = request_dict.get('username') old_coord = org_dao.remove_coordinator(campaign_id, username) data = { 'username': username, 'campaign_id': campaign_id, 'last_active_date': format_date(old_coord.last_active_date) } return {'data': data}
def search_by_date(self): """ Search a task by date""" date = utils.create_date('Enter the date') # Format the date to dd/mm/yyyy date = utils.format_date(date) rows = (Task.select( Task, Employee).join(Employee).where(Task.date == date).naive()) return rows
def _add_one(self, url, time): result = template["content"].format( config["site_url"], url, format_date(time, "sitemap"), config["sitemap_freq"], config["sitemap_priority"] ) if self._debug: print result return result
def add_log(given_date): requested_date = format_date(given_date) existing_date = Date.query.filter_by(date=requested_date).first() if not existing_date: new_date = Date(date=requested_date, public_id=str(uuid.uuid4()), user=current_user) db.session.add(new_date) db.session.commit() else: flash("This date already exists.")
def __init__(self, restaurant_name, date, time, size): """ :str restaurant_name: name of restaurant :str date: date of reservation, format 2019/01/01, 01/01/2019, 2019-01-01, 01-01-2019 :str time: time of reservation, format 4:00 PM :int size: party size """ self._restaurant_name = restaurant_name.strip() self._date = utils.format_date(date) self._time = utils.format_time(time) self._size = size self._open_driver()
def search_by_dates_range(self): """Search task between range of dates""" date1 = utils.create_date('Start date') date2 = utils.create_date('Finish date') if date1 < date2: start_date = date1 finish_date = date2 elif date1 > date2: start_date = date2 finish_date = date1 # Format dates start_date = utils.format_date(start_date) finish_date = utils.format_date(finish_date) rows = (Task.select(Task, Employee).join(Employee).where( Task.date.between(start_date, finish_date)).naive()) return rows
def result(self): rv = {} artifact = cache_registry.lookup( "pinwall.artifacts.ArtifactService.artifact_by_id")( self.artifact_id) if artifact: artifact["created_at"] = format_date(artifact.get("created_at")) rv["artifact"] = artifact if self.show_user: user = cache_registry.lookup( "pinwall.users.UserService.user_by_id")( artifact["user_id"]) rv["user"] = user if user else None if artifact.get("topic_id", None) and self.show_topic: topic = cache_registry.lookup( "pinwall.artifacts.TopicService.topic_by_id")( artifact["topic_id"]) if topic: topic["created_at"] = format_date(topic.get("created_at")) rv["topic"] = topic if topic else None return rv
def get_data(self, pv, start, end): """Retrieve archived data :param pv: name of the pv. :param start: start time. Can be a string or `datetime.datetime` object. :param end: end time. Can be a string or `datetime.datetime` object. :return: `pandas.DataFrame` """ # http://slacmshankar.github.io/epicsarchiver_docs/userguide.html params = { "pv": pv, "from": utils.format_date(start), "to": utils.format_date(end), } try: r = self.get(self.data_url, params=params) data = self._return_json(r) except: url = self.data_url + "?pv=" + urllib.quote_plus(pv) + '&' + \ urllib.urlencode({"from":utils.format_date(start), "to":utils.format_date(end)}) print(url) req = urllib2.urlopen(url) data = json.load(req) #data = self.request_by_urllib2(url) df = pd.DataFrame(data[0]["data"]) #print(df) try: if pd.__version__ > '0.8.0': df["date"] = pd.to_datetime(df["secs"] + df["nanos"] * 1e-9, unit="s") else: df["date"] = pd.to_datetime([datetime.fromtimestamp(x["secs"] + \ x["nanos"] * 1e-9,) for x in data[0]["data"]]) except KeyError: # Empty data pass else: df = df[["date", "val"]] df = df.set_index("date") return df
def pd_json_to_df(pd_json, pd_columns=pd_columns): pd = json.load(open(pd_json)) pd_cls_mapping = prediction_class_mapping() pd_df = pandas.DataFrame(columns=list(pd_cls_mapping.values())) for key, values in pd.items(): file_id = key.split('.')[0] photo_tracking_num, page_num = file_id.split("_") for val in values: item_dict = { j: val.get(i, [None, None, None])[1] for i, j in pd_cls_mapping.items() } item_dict.update({ "photo_tracking_number": photo_tracking_num, "page_number": float(page_num) }) pd_df = pd_df.append(item_dict, ignore_index=True) pd_df['date_of_service'] = pd_df['date_of_service'].apply( lambda x: format_date(x) if format_date(x) else float('nan')) pd_df['provider_number.1'] = pd_df['provider_number.1'].apply( lambda x: x.strip("(").strip(")") if isinstance(x, str) else x) return pd_df
def result(self): rv = {} user = cache_registry.lookup("pinwall.users.UserService.user_by_id")( self.comment.commenter_id) rv["comment"] = self.comment rv["user"] = user if user else None if self.show_artifact: artifact = cache_registry.lookup( "pinwall.artifacts.ArtifactService.artifact_by_id")( self.comment.artifact_id) if artifact: artifact["created_at"] = format_date(artifact["created_at"]) rv["artifact"] = artifact if artifact else None return rv
def _add_one(self, article): result = template["content"].format( article["title"]["view"], "%s/article/%s" % (config["site_url"], article["title"]["slug"]), self._format_content(article["content"]), "".join([template["creator"].format(author["view"]) for author in article["authors"]]), format_date(datetime.strptime(article["date"], "%Y.%m.%d %H:%M"), "feeds"), config["site_url"], article["date"], "article/%s" % article["title"]["slug"], "".join([template["tag"].format(tag["view"]) for tag in article["tags"]]), ) if self._debug: print result return result
def fetch_to_file(fl_name=FETCHED, days=0): """ 抓取ok169数据,存入文件,按照\t分隔 """ if days == 0: date_str = utils.format_date(date.today()) else: date_str = '%s,%s' % (utils.get_day_of_day(days), utils.format_date(date.today())) print date_str with open("temp", 'w') as output: page_now = 1 while 1: result = query_update(date_str, page_now) # if write_head and len(result) > 0: # output.write('\t'.join(result[1]) + '\n') # write_head = False if len(result) > 2: for r in result[2:]: output.write('\t'.join(map(lambda x: filter_flied(x), r)) + '\n') print "fetched page -->", page_now page_now += 1 else: break output.flush()
def validate_date_range(self, start=0, end=9999999999): startdt = datetime.datetime.utcfromtimestamp(start) enddt = datetime.datetime.utcfromtimestamp(end) today = datetime.datetime.today() now = time.time() if startdt >= enddt: # Flip values stflp = startdt endflp = enddt start = endflp end = stflp if startdt > today: start = now # enddt = enddt + datetime.timedelta(days=1) if enddt > today: end = now if isinstance(start, datetime.date) or isinstance(start, str): start = utils.format_date(start.strftime('%Y-%m-%d'), True) if isinstance(end, datetime.date) or isinstance(end, str): end = utils.format_date(end.strftime('%Y-%m-%d'), True) # ADJUST DATE TO MARKET OPEN TIME (9:30AM) start = start - 172800 end = end + 172800 return {"start": math.trunc(start), "end": math.trunc(end)}
def process_AUDIT(x): res = [] b = [e.strip() for e in x.split("_|") if e.strip()] c = [e.split("\n ") for e in b] for line in c: subs = {} subs["a"] = line[0] action = line[1].split("|")[1] subs["b"] = actions.get(action, action) if not action in actions: print action subs["c"] = format_date(line[2].split("|")[1], "%Y %m %d") subs["d"] = line[3].split("|")[1] res.append({"tag": "103", "ind1": " ", "ind2": " ", "subs": subs}) return res
async def index(): if request.content_type == 'application/json': data = await request.get_json() if data is not None: db = get_db() db.execute( "INSERT INTO posts(latitude,longitude,velocidade,satelites,data_hora) VALUES(?,?,?,?,?);", [ data['Latitude'], data['Longitude'], data['Velocidade'], data['Satelites'], str(format_date(f"{data['Data']} {data['Hora']}")) ], ) db.commit() return 'success'
def _add_one(self, article): result = template["content"].format( article["title"]["view"], "%s/article/%s" % (config["site_url"], article["title"]["slug"]), self._format_content(article["content"]), "".join([ template["creator"].format(author["view"]) for author in article["authors"] ]), format_date(datetime.strptime(article["date"], "%Y.%m.%d %H:%M"), "feeds"), config["site_url"], article["date"], "article/%s" % article["title"]["slug"], "".join([ template["tag"].format(tag["view"]) for tag in article["tags"] ])) if self._debug: print result return result
def sample(tags, metric_name, period, timestamp): series_id = get_or_create_series(tags=tags, metric_name=metric_name, period=period)['id'] query = ''' query {{ series(id: "{series_id}") {{ sample(timestamp: "{timestamp}") {{ timestamp value }} }} }} '''.format(series_id=series_id, timestamp=utils.format_date(timestamp)) return graphql(query)['series']['sample']
def round_announcement(tournament_dir, tournament_metadata, puzzle_name, level_code=None, attachment=None): """Helper to announce_round_start for creating the announcement msg, also used for the TO to preview. Return the announcement's embed and puzzle file. """ round_metadata = tournament_metadata['rounds'][puzzle_name] round_dir = tournament_dir / round_metadata['dir'] if attachment is None: puzzle_file = next(round_dir.glob('*.puzzle'), None) if puzzle_file is None: raise FileNotFoundError(f"{round_metadata['round_name']} puzzle file not found") # Upload the attachment as a .txt so it can be previewed without downloading attachment = discord.File(str(puzzle_file), filename=puzzle_file.stem + '.txt') with open(puzzle_file, 'r', encoding='utf-8') as pf: level_code = pf.read() # Note: read() converts any windows newlines to unix newlines single_line_level_code = level_code.replace('\n', '') with open(round_dir / 'description.txt', encoding='utf-8') as f: description = f.read().strip() # Discord's embeds seem to be the only way to do a hyperlink to hide the giant puzzle preview link embed = discord.Embed(author=tournament_metadata['name'], title=f"**Announcing {round_metadata['round_name']}, {puzzle_name}!**", description=description) embed.add_field(name='Preview', value=f"[Coranac Site]({CORANAC_SITE}?code={single_line_level_code})", inline=True) embed.add_field(name='Metric', value=f"`{round_metadata['metric']}`", inline=True) embed.add_field(name='Points', value=round_metadata['points'], inline=True) if 'max_cycles' in round_metadata: embed.add_field(name='Max Cycles', value=round_metadata['max_cycles'], inline=True) # Make the ISO datetime string friendlier-looking (e.g. no +00:00) or indicate puzzle is tournament-long round_end = format_date(round_metadata['end']) if round_metadata['end'] == tournament_metadata['end']: round_end += " (Tournament Close)" embed.add_field(name='Deadline', value=round_end, inline=True) # TODO: Add @tournament or something that notifies people who opt-in, preferably updateable by bot return embed, attachment
def generate(self): logger.info("Feeds: Writing start...") self._files = {} time = format_date(datetime.now(), "feeds") articles = list(self._collection.find({})) articles.sort(key=lambda article: article["date"], reverse=True) for article in articles: content, file_names = self._format_article(article) self._update_files(file_names, time) for name in file_names: self._files[name["slug"].encode("utf-8")].write(self._add_one(content)) indexes = {} for file_name, file_obj in self._files.items(): file_obj.write(template["end"]) file_obj.close() indexes[file_name] = "%s.rss.xml" % file_name logger.info("Feeds: Done %s..." % file_name) with open("%s/%s" % (config["feeds_dir_path"], "indexes.json"), "w") as f: json.dump(indexes, f) logger.info("Feeds: Writing done...")
def get_cookie(driver, store_id, config): if access_store(driver, store_id): date = utils.get_last_visit_date(config["week_frequency"]) formatted = utils.format_date(date, driver.find_element_by_id("answ5195").get_attribute("placeholder")) utils.input(driver, "answ5195", formatted, False) utils.input(driver, "answHour5195", config["hour"], False) utils.input(driver, "answMinute5195", config["minute"], False) utils.set_vote(driver, "answc5197", config["recommend_to_friend"]) utils.set_vote(driver, "answ5198", config["general_experience"]) utils.set_vote(driver, "answ51990", config["food_quality"]) utils.set_vote(driver, "answ51991", config["food_quality"]) utils.set_vote(driver, "answ51992", config["polite_staff"]) utils.set_vote(driver, "answ51993", config["cleaning"]) utils.set_vote(driver, "answ51994", config["comfort"]) utils.set_vote(driver, "answ51995", config["general_experience"]) utils.set_drop_down(driver, "answ5220", config["compliment"]) if config["compliment"]: utils.input(driver, "answ5221", config["compliment_message"]) utils.set_drop_down(driver, "answ5222", config["warning"]) if config["warning"]: utils.input(driver, "answ5223", config["warning_message"]) utils.input(driver, "answ5224", config["fast_food_in_month"]) utils.input(driver, "answ5225", config["subway_in_month"]) utils.input(driver, "answ5218", config["my_email"]) utils.set_drop_down(driver, "answ5219", config["receive_newsletter"]) utils.set_drop_down(driver, "DdlContact", config["contact_me"]) utils.button_click(driver, "btnSubmit") time.sleep(1) utils.deal_with_error_message(driver) time.sleep(5) return driver.find_element_by_id("ctl03_lblTag").text
def build_data(self): cnt = 1 with open(self.filename, newline='') as csvfile: datareader = csv.reader(csvfile, delimiter=',') for row in datareader: if cnt == 1: cnt += 1 continue # Skip first line with only column names #print(row) if len(row[1]) > 0: self.Date = u.format_date(row[0]) self.AverageTemperature = u.scrub_decimal(row[1]) # Build dictionary for the selected month if u.is_month(self.Date, self.month): self.result_dict[self.Date] = round( self.AverageTemperature, 2) cnt += 1 return self.result_dict
def create_sample(metric_name, period, tags, timestamp, value, overwrite=False): query = ''' mutation($series: SeriesPrimary!, $timestamp: Datetime!, $value: Float!, $overwrite: Boolean) { sample: create_sample(series: $series, timestamp: $timestamp, value: $value, overwrite: $overwrite) { timestamp value } } ''' # noqa: E501 variables = { 'series': { 'metric': { 'name': metric_name }, 'period': period, 'tags': tags, }, 'timestamp': utils.format_date(timestamp), 'value': value, 'overwrite': overwrite, } logger.info("Creating new sample for metric {metric}, period {period}, " "tags {tags}, timestamp {timestamp}, value {value}, " "overwrite {overwrite}".format(metric=metric_name, period=period, tags=tags, timestamp=timestamp, value=value, overwrite=overwrite)) return graphql(query, variables)['sample']
def _get_user_info(self, user): try: user = self.api.users.get( user_ids=user, fields=['bdate', 'relation', 'city', 'last_seen', 'domain']) except: return 'Такой пользователь не найден' id = user[0].get('id') first_name = user[0].get('first_name') last_name = user[0].get('last_name') bdate = user[0].get('bdate') relation = user[0].get('relation') if relation: relation = get_family_status(relation) city = user[0].get('city').get('title') last_seen = format_date(user[0].get('last_seen').get('time')) domain = 'https://vk.com/' + user[0].get('domain') return f'''{first_name} {last_name}\n Id: {id}\nСтраница: {domain}
def result(self): rv = {} topic = cache_registry.lookup( "pinwall.artifacts.TopicService.topic_by_id")(self.topic_id) first_load_artifacts = [] if topic: user = cache_registry.lookup( "pinwall.users.UserService.user_by_id")(topic["user_id"]) first_load_artifact_ids = cache_registry.lookup( "pinwall.artifacts.TopicService.first_load_artifact_ids")( self.topic_id) if first_load_artifact_ids: for artifact_id in first_load_artifact_ids: artifact = ArtifactResultProxy(artifact_id, show_topic=False).result() if artifact: artifact["created_at"] = format_date( artifact.get("created_at")) first_load_artifacts.append(artifact) rv["topic"] = topic if topic else None rv["user"] = user if user else None rv["artifacts"] = first_load_artifacts return rv
def generate(self): logger.info("Feeds: Writing start...") self._files = {} time = format_date(datetime.now(), "feeds") articles = list(self._collection.find({})) articles.sort(key=lambda article: article["date"], reverse=True) logger.info("Feeds: Writing: ") for article in articles: content, file_names = self._format_article(article) self._update_files(file_names, time) for name in file_names: self._files[name["slug"].encode("utf-8")].write( self._add_one(content)) indexes = {} logger.info("Feeds: Done: ") for file_name, file_obj in self._files.items(): file_obj.write(template["end"]) file_obj.close() indexes[file_name] = "%s.rss.xml" % file_name logger.info("'%s' " % file_name, False) with open("%s/%s" % (config["feeds_dir_path"], "indexes.json"), "w") as f: json.dump(indexes, f) logger.info("Feeds: Writing done...")
def get_all_data(start_date, end_date): try: os.mkdir('./json_data') except OSError as os_err: print os_err try: os.mkdir('./mongo_data') except OSError as os_err: print os_err date_list = [start_date + dt.timedelta(days=x) for x in range(0, (end_date - start_date).days)] if dt.date(2014, 2, 16) in date_list: date_list.remove(dt.date(2014, 2, 16)) if dt.date(2015, 2, 15) in date_list: date_list.remove(dt.date(2015, 2, 15)) for game_day in date_list: print 'Processing game day', game_day game_ids = get_games(game_day, 'json_data/2013/game-day-{0}.json'.format(format_date(game_day))) # now all the data is just contained in the boxscore... base_url = 'http://www.si.com/pbp/liveupdate' for game_id in game_ids: result = requests.get(base_url, params={'json': '1', 'sport': 'basketball/nba', 'id': str(game_id), 'box': 'true', 'pbp': 'true', 'linescore': 'true'}) try: json_result = result.json()['apiResults'][0] #dict_result = json.loads(json_result)['apiResults'] #print json_result print game_id output_file = 'json_data/2013/pbp_{0}_{1}.json'.format(format_date(game_day), game_id) with open(output_file, 'w') as of: json.dump(json_result, of, indent=4) boxscore_data = json_result['league']['season']['eventType'][0]['events'][0]['boxscores'] team_data = json_result['league']['season']['eventType'][0]['events'][0]['teams'] for team in team_data: filtered_team_data = {'id': team['teamId'], 'location': team['location'], 'nickname': team['nickname'], 'abbreviation': team['abbreviation']} print filtered_team_data['nickname'] teams.update({'id': team['teamId']}, filtered_team_data, upsert=True) for team in boxscore_data: for player in team['playerstats']: filtered_player_data = {'id': player['player']['playerId'], 'firstName': player['player']['firstName'], 'lastName': player['player']['lastName']} players.update({'id': player['player']['playerId']}, filtered_player_data, upsert=True) pbp.update({'league.season.eventType.0.events.0.eventId': game_id}, json_result, upsert=True) except Exception as ex: print ex
def process_ADDED(x): if 'at' in x: v = format_date(x, '%Y %m %d at %H:%M:%S') else: v = format_date(x, '%d %b %Y') return [{'tag': '100', 'ind1': ' ', 'ind2': ' ', 'subs': {'a': v}}]
def begin(self): return format_date(self.begin_date, self.begin_date_precision)
def end(self): return format_date(self.end_date, self.end_date_precision)
for line in c: subs = {} subs["a"] = line[0] action = line[1].split("|")[1] subs["b"] = actions.get(action, action) if not action in actions: print action subs["c"] = format_date(line[2].split("|")[1], "%Y %m %d") subs["d"] = line[3].split("|")[1] res.append({"tag": "103", "ind1": " ", "ind2": " ", "subs": subs}) return res mapping = defaultdict(lambda: None) mapping["BARCD"] = lambda x: "b" + x mapping["DDATE"] = lambda x: format_date(x, "%m/%d/%Y", sierra=True) mapping["ODATE"] = lambda x: format_date(x, "%m/%d/%Y", sierra=True) mapping["UID"] = lambda x: "b" + x def format_record(recid, record=None): if not record: record = ALL[str(recid)] if len(record): out = "o" for field in ["ODATE", "BARCD", "UID", "DDATE"]: # for (field, value) in record.items(): if field in record: try: out += ":" + mapping[field](record[field]) except:
def RunCFind(self): tags = [(0x0010, 0x0010), (0x0010, 0x1010), (0x0010,0x0040), (0x0008,0x1030),\ (0x0008,0x0060), (0x0008,0x0022), (0x0008,0x0080), (0x0010,0x0030),\ (0x0008,0x0050), (0x0008,0x0090), (0x0008,0x103E), (0x0008,0x0033),\ (0x0008,0x0032), (0x0020,0x000d)] ds = gdcm.DataSet() for tag in tags: tg = gdcm.Tag(tag[0], tag[1]) de = gdcm.DataElement(tg) if self.search_type == 'patient' and tag == (0x0010, 0x0010): bit_size = len(self.search_word) + 1 de.SetByteValue(str(self.search_word + '*'), gdcm.VL(bit_size)) else: de.SetByteValue('*', gdcm.VL(1)) ds.Insert(de) cnf = gdcm.CompositeNetworkFunctions() theQuery = cnf.ConstructQuery(gdcm.ePatientRootType, gdcm.eImageOrFrame, ds) ret = gdcm.DataSetArrayType() cnf.CFind(self.address, int(self.port), theQuery, ret, self.aetitle,\ self.aetitle_call) patients = {} exist_images = False c = 0 for i in range(0,ret.size()): patient_id = str(ret[i].GetDataElement(gdcm.Tag(0x0010, 0x0020)).GetValue) serie_id = str(ret[i].GetDataElement(gdcm.Tag(0x0020, 0x000e)).GetValue) if not(patient_id in patients.keys()): patients[patient_id] = {} if not(serie_id in patients[patient_id]): rt = ret[i] name = self.GetValueFromDICOM(rt, (0x0010, 0x0010)) age = self.GetValueFromDICOM(rt, (0x0010, 0x1010)) gender = self.GetValueFromDICOM(rt, (0x0010,0x0040)) study_description = self.GetValueFromDICOM(rt, (0x0008,0x1030)) modality = self.GetValueFromDICOM(rt, (0x0008,0x0060)) institution = self.GetValueFromDICOM(rt, (0x0008,0x0080)) date_of_birth = utils.format_date(self.GetValueFromDICOM(rt, (0x0010,0x0030))) acession_number = self.GetValueFromDICOM(rt, (0x0008,0x0050)) ref_physician = self.GetValueFromDICOM(rt, (0x0008,0x0090)) serie_description = self.GetValueFromDICOM(rt, (0x0008,0x103E)) acquisition_time = utils.format_time(self.GetValueFromDICOM(rt, (0x0008,0x0032))) acquisition_date = utils.format_date(self.GetValueFromDICOM(rt, (0x0008,0x0022))) teste = self.GetValueFromDICOM(rt, (0x0020,0x000d)) print ">>>>>>>>>>>>>>>>>>>>", teste patients[patient_id][serie_id] = {'name':name, 'age':age, 'gender':gender,\ 'study_description':study_description,\ 'modality':modality, \ 'acquisition_time':acquisition_time,\ 'acquisition_date':acquisition_date,\ 'institution':institution,\ 'date_of_birth':date_of_birth,\ 'acession_number':acession_number,\ 'ref_physician':ref_physician,\ 'serie_description':serie_description} patients[patient_id][serie_id]['n_images'] = 1 else: patients[patient_id][serie_id]['n_images'] += 1 return patients