async def gen_radia(self, ctx, date): """Generates the World of Radia given a date Enter a date with format Month-Day-Year or Month/Day/Year ex. June 1st 2000 -> 06/01/2000 or 06-01-2000""" date = utils.split_date(date) if date is None: ctx.send(embed=utils.generate_embed("Error", "Please enter a valid date")) center = Image.open("img/background.png") ringsFiles = [ "img/rings/ring6.png", "img/rings/ring5.png", "img/rings/ring4.png", "img/rings/ring3.png", "img/rings/ring2.png", "img/rings/ring1.png", "img/rings/ring0.png", ] ringSpeeds = [0.25, 1, -2, 1.5, 1, -2, 0] # num rotations per year dayOfYear = 360 * date["year"] + 30 * (date["month"] - 1) + date["day"] - 1 for ring in ringsFiles: temp = Image.open(ring) temp = temp.rotate(angle=-ringSpeeds[ringsFiles.index(ring)] * 0.6 * dayOfYear) # 360 days per year center.paste(temp, (0, 0), temp) center.save("img/out.png") await ctx.send(file=discord.File("img/out.png", filename="img/out.png") )
async def search_reminders(self, ctx, date: Optional[str] = None): """Searches for reminders on a specific day""" if date: try: date = utils.split_date(date) except UnboundLocalError: await ctx.send(embed=utils.generate_embed( "", "Date was not in the correct format.")) return 1 db_search = database.get_reminders( ctx.message.guild.id, **{ "year": date["year"], "month": date["month"], "day": date["day"] }, ) else: db_search = database.get_reminders(ctx.message.guild.id) message = "" for reminder in db_search: message += f'\n{reminder["reminder_id"]}\t{reminder["human_readable_time"]}\t{reminder["reminder_text"]}\n' if not message: message = "No reminders found" await ctx.send( embed=utils.generate_embed("Search Results:", f"```{message}```"))
def update_graph(results_dump): # Update conditions if results_dump is None: raise PreventUpdate results = json.loads(results_dump) # compute ticks start_year = results["start_year"] end_year = results["end_year"] all_x = [(year, month) for year in range(start_year, end_year + 1) for month in range(1, 12 + 1)] # compute values # as map so we add zeros where they miss (todo: could be done with pandas probably) data = results["data"] if (len(data)): df = pd.DataFrame(data) utils.split_date(df) df_per_month = utils.per_month(df) per_month_count = utils.count(df_per_month) y_map = { year_month: per_month_count["count"].loc[year_month] for year_month in per_month_count.index } else: # no data y_map = {} y = [ y_map[year_month] if year_month in y_map else 0 for year_month in all_x ] # TODO: whats the correct value? x = ["{}-{}-01".format(year, month) for (year, month) in all_x] # list(range(0, len(y))) return { "data": [{ "x": x, "y": y, "type": "bar", }], "layout": { "xaxis": { "tickformat": "%Y/%m" } } }
def fmt_csv(self, data): issns = [] if data.journal.print_issn: issns.append(data.journal.print_issn) if data.journal.electronic_issn: issns.append(data.journal.electronic_issn) line = [] line.append(datetime.datetime.now().isoformat()[0:10]) line.append(u'document') line.append(data.collection_acronym) line.append(data.journal.scielo_issn) line.append(u';'.join(issns)) line.append(data.journal.title) line.append(u';'.join(data.journal.subject_areas or [])) for area in choices.THEMATIC_AREAS: if area.lower() in [i.lower() for i in data.journal.subject_areas or []]: line.append(u'1') else: line.append(u'0') line.append('1' if len(data.journal.subject_areas or []) > 1 else '0') line.append(data.journal.current_status) line.append(data.publisher_id) line.append(data.publication_date[0:4]) line.append(data.document_type) line.append(u'1' if data.document_type.lower() in choices.CITABLE_DOCUMENT_TYPES else '0') line.append(data.receive_date or '') receive_splited = utils.split_date(data.receive_date or '') line.append(receive_splited[0]) # year line.append(receive_splited[1]) # month line.append(receive_splited[2]) # day line.append(data.acceptance_date or '') acceptance_splited = utils.split_date(data.acceptance_date or '') line.append(acceptance_splited[0]) # year line.append(acceptance_splited[1]) # month line.append(acceptance_splited[2]) # day line.append(data.review_date or '') review_splited = utils.split_date(data.review_date or '') line.append(review_splited[0]) # year line.append(review_splited[1]) # month line.append(review_splited[2]) # day line.append(data.publication_date or '') publication_splited = utils.split_date(data.publication_date or '') line.append(publication_splited[0]) # year line.append(publication_splited[1]) # month line.append(publication_splited[2]) # day line.append(data.creation_date or '') creation_splited = utils.split_date(data.creation_date or '') line.append(creation_splited[0]) # year line.append(creation_splited[1]) # month line.append(creation_splited[2]) # day line.append(data.update_date or '') update_splited = utils.split_date(data.update_date or '') line.append(update_splited[0]) # year line.append(update_splited[1]) # month line.append(update_splited[2]) # day joined_line = ','.join(['"%s"' % i.replace('"', '""') for i in line]) return joined_line
async def add_reminder( self, ctx, date: str, user_time: str, text: str, repeating: Union[str, bool] = False, ): """Attempts to add a reminder""" # Checks if the reminder should repeat, and if it is a valid interval try: _date = utils.split_date(date) _time = utils.split_time(user_time) except UnboundLocalError: raise commands.UserInputError("Date or time was not in the correct format.") if repeating and repeating not in conversion_dict: raise commands.UserInputError() # Tries to insert the reminder result = database.insert_reminder( ctx.guild.id, ctx.channel.id, _date["year"], _date["month"], _date["day"], _time["hour"], _time["minute"], text, repeating, ) # Sends a status message, and restarts the reminders if result: await asyncio.create_task(self.setup_reminders()) await ctx.send( embed=utils.generate_embed( "Reminder Stored", f"{date}\n{user_time}\n{text}\nrepeating: {repeating}", ) ) # This means the insertion of the reminder failed else: await ctx.send( embed=utils.generate_embed( "Error", "`This reminder already exists in the database or is not in the future`", ) )
def calculate_inventory(self): temp_data = self.calculate_backlog() inventory = temp_data.loc[(temp_data["LoaiBacklog"] == "Kho giao") & (temp_data["TrangThai"] != "Đang giao hàng")] inventory["LoaiXuLy"] = "Chưa giao lại" never_delivery_filter = inventory["ThoiGianGiaoLanDau"].isnull() mistaken_delivery_filter = ~inventory["GhiChuGHN"].isnull( ) & inventory["GhiChuGHN"].str.contains( datetime.now( tz=pytz.timezone("Asia/Ho_Chi_Minh")).strftime("%d/%m/%Y")) inventory.loc[never_delivery_filter, "LoaiXuLy"] = "Chưa giao lần nào" inventory.loc[mistaken_delivery_filter, "LoaiXuLy"] = "Giao lỗi" inventory["N_ve_kho"] = inventory["N0"].apply(lambda x: split_date(x)) inventory["H_ve_kho"] = inventory["N0"].apply(lambda x: split_time(x)) del inventory["GhiChu"] return inventory
def fmt_csv(self, data, history): hist, status, reason = history issns = [] if data.print_issn: issns.append(data.print_issn) if data.electronic_issn: issns.append(data.electronic_issn) line = [] line.append(datetime.datetime.now().isoformat()[0:10]) line.append(u'journal') line.append(data.collection_acronym) line.append(data.scielo_issn) line.append(u';'.join(issns)) line.append(data.title) line.append(u';'.join(data.subject_areas or [])) for area in choices.THEMATIC_AREAS: if area.lower() in [i.lower() for i in data.subject_areas or []]: line.append(u'1') else: line.append(u'0') line.append('1' if len(data.subject_areas or []) > 1 else '0') line.append(data.current_status) line.append(hist) hist_splited = utils.split_date(hist or '') line.append(hist_splited[0]) # year line.append(hist_splited[1]) # month line.append(hist_splited[2]) # day line.append(status) line.append(reason) joined_line = ','.join(['"%s"' % i.replace('"', '""') for i in line]) return joined_line
def fmt_csv(self, data, history): hist, status, reason = history issns = [] if data.print_issn: issns.append(data.print_issn) if data.electronic_issn: issns.append(data.electronic_issn) line = [] line.append(datetime.datetime.now().isoformat()[0:10]) line.append(u'journal') line.append(data.collection_acronym) line.append(data.scielo_issn) line.append(u';'.join(issns)) line.append(data.title) line.append(u';'.join(data.subject_areas or [])) for area in choices.THEMATIC_AREAS: if area.lower() in [i.lower() for i in data.subject_areas or []]: line.append(u'1') else: line.append(u'0') line.append('1' if len(data.subject_areas or []) > 2 else '0') line.append(data.current_status) line.append(hist) hist_splited = utils.split_date(hist or '') line.append(hist_splited[0]) # year line.append(hist_splited[1]) # month line.append(hist_splited[2]) # day line.append(status) line.append(reason) joined_line = ','.join(['"%s"' % i.replace('"', '""') for i in line]) return joined_line
def test_split_date_5(self): result = utils.split_date('') self.assertEqual(result, ('', '', ''))
def test_split_date_2(self): result = utils.split_date('2016-01') self.assertEqual(result, ('2016', '01', ''))
def fmt_csv(self, data): issns = [] if data.journal.print_issn: issns.append(data.journal.print_issn) if data.journal.electronic_issn: issns.append(data.journal.electronic_issn) line = [] line.append(datetime.datetime.now().isoformat()[0:10]) line.append(u'document') line.append(data.collection_acronym) line.append(data.journal.scielo_issn) line.append(u';'.join(issns)) line.append(data.journal.title) line.append(u';'.join(data.journal.subject_areas or [])) for area in choices.THEMATIC_AREAS: if area.lower() in [ i.lower() for i in data.journal.subject_areas or [] ]: line.append(u'1') else: line.append(u'0') line.append('1' if len(data.journal.subject_areas or []) > 2 else '0') line.append(data.journal.current_status) line.append(data.publisher_id) line.append(data.publication_date[0:4]) line.append(data.document_type) line.append(u'1' if data.document_type.lower() in choices.CITABLE_DOCUMENT_TYPES else '0') line.append(data.receive_date or '') receive_splited = utils.split_date(data.receive_date or '') line.append(receive_splited[0]) # year line.append(receive_splited[1]) # month line.append(receive_splited[2]) # day line.append(data.acceptance_date or '') acceptance_splited = utils.split_date(data.acceptance_date or '') line.append(acceptance_splited[0]) # year line.append(acceptance_splited[1]) # month line.append(acceptance_splited[2]) # day line.append(data.review_date or '') review_splited = utils.split_date(data.review_date or '') line.append(review_splited[0]) # year line.append(review_splited[1]) # month line.append(review_splited[2]) # day line.append(data.ahead_publication_date or '') ahead_publication_date_splited = utils.split_date( data.ahead_publication_date or '') line.append(ahead_publication_date_splited[0]) # year line.append(ahead_publication_date_splited[1]) # month line.append(ahead_publication_date_splited[2]) # day line.append(data.publication_date or '') publication_splited = utils.split_date(data.publication_date or '') line.append(publication_splited[0]) # year line.append(publication_splited[1]) # month line.append(publication_splited[2]) # day line.append(data.creation_date or '') creation_splited = utils.split_date(data.creation_date or '') line.append(creation_splited[0]) # year line.append(creation_splited[1]) # month line.append(creation_splited[2]) # day line.append(data.update_date or '') update_splited = utils.split_date(data.update_date or '') line.append(update_splited[0]) # year line.append(update_splited[1]) # month line.append(update_splited[2]) # day joined_line = ','.join(['"%s"' % i.replace('"', '""') for i in line]) return joined_line