def __init__(self, booking_date, value_date, ty, reference, amount, purpose): self.type = ty self.booking_date = parse_date(booking_date, parserinfo=parserinfo(True)) self.value_date = parse_date(value_date, parserinfo=parserinfo(True)) self.amount = amount self.reference = reference self.purpose = purpose
def run(self, node): self.load_ticket() list = self.get_backup_list(node) delta = None success = False size = 0 if list: last_backup = list[-1] volid = last_backup.get('volid') size = last_backup.get('size') if re.search(self.REGEX, volid): found = re.findall(self.REGEX, volid)[0].replace('_', '.') parserinfo = parser.parserinfo(dayfirst=False, yearfirst=True) backup_date = parse(found, parserinfo) now = datetime.now() delta = now - backup_date days = delta.days success = delta.days <= self.MAX_DAYS return { "node": node, "delta": days, "success": success, "size": size, "mb": round(size / 1024000, 2) if size > 0 else size, "gb": round(size / 1024000000, 2) if size > 0 else size }
def parse_data(path: Path) -> pd.DataFrame: cols = ["timestamp", "user", "message"] rows = [] parser = time_parser(info=parserinfo(dayfirst=True)) line_regex = re.compile( "(.+)\\s(\\d{1,2}:\\d{2}.*?)\\s+-\\s+(.+?):\\s+(.+)") last_message = None multiline = False with open(str(path), "r", encoding="utf-8") as file: for line in file: match = line_regex.match(line) if match: if multiline: # print(last_message) # print("-" * 30) rows[-1][2] = last_message multiline = False timestamp = parser.parse(f"{match[1]} {match[2]}") rows.append([timestamp, match[3], match[4]]) last_message = match[4] else: if last_message is not None: if not multiline: multiline = True last_message = f"{last_message}{line}" conversation_data = pd.DataFrame(data=rows, columns=cols) return conversation_data
def _parse_single_value(value): # Only extract the datetime parts that are actually given! defaults = [ DT.datetime(1, 1, 1, 0, 0, 0), DT.datetime(2, 2, 2, 2, 2, 2) ] info = parserinfo(dayfirst=dayfirst) parsed = [parse(value, info, default=d) for d in defaults] start = parsed[0] if start == parsed[1]: # All attributes have been provided, # i.e. an exact date has been passed. return (start, start) # Determine which attributes have actually been specified. attrs = ['year', 'month', 'day', 'hour', 'minute', 'second'] given_attrs = set([ attr for attr in attrs if getattr(parsed[1], attr) == getattr(parsed[0], attr) ]) # If some attributes are left out, return a time span. if given_attrs == set(['year']): end = start + relativedelta(years=1) elif given_attrs == set(['year', 'month']): end = start + relativedelta(months=1) elif given_attrs == set(['year', 'month', 'day']): end = start + relativedelta(days=1) elif given_attrs == set(['year', 'month', 'day', 'hour']): end = start + relativedelta(hours=1) else: return (start, start) return (start, end)
class Command(CSVBaseCommand): """ Command to set Company.created_on where missing. Some Company records have missing created_on values which besides being inconsistent with the parent BaseModel class is also causing these Companies to be omitted from Data Workspace. """ _uk_date_format_parserinfo = parserinfo(dayfirst=True) def _process_row(self, row, simulate=False, **options): """Process a single row.""" # .parse() creates a datetime object even in the absence of hours, minutes supplied_datetime = parse( row['Suggested Created Date'], parserinfo=self._uk_date_format_parserinfo, ) pk = parse_uuid(row['UUID']) company = Company.objects.get(pk=pk) if company.created_on is not None: logger.warning( f'Company {pk} already has a `created_on`; skipping', ) return if simulate: return company.created_on = supplied_datetime with reversion.create_revision(): company.save(update_fields=('created_on', )) reversion.set_comment('Created datetime updated.')
def get(self): city_name = request.args.get('city') date = request.args.get('date') # Response if no filter is provided if date is None and city_name is None: return [self.get_weather(city.name)for city in CityModel.query.all()], 200 if city_name is None: return {'message' : 'Please enter a city name to get weather information'}, 400 city = CityModel.find_by_name(city_name.lower()) # Check if provided city exists in the database or not if city: # If city exists check if date is provided or not if date: input_date = parse(date, parserinfo(dayfirst = True)).replace(hour = 0, minute = 0, second = 0, microsecond = 0) # Check if weather information for a particular date is available or not weather_obj = WeatherModel.find_by_date(city.name, input_date) if weather_obj: return weather_obj.json(), 200 # If absent, fetch the data for the city from the API else: new_forecast = forecast(darksky_key, city.latitude, city.longitude, time = input_date.isoformat()) return {'summary' : new_forecast.summary, 'temperature' : new_forecast.temperature, 'humidity' : new_forecast.humidity}, 200 else: return self.get_weather(city.name), 200 return {'message' : 'Requested city is not present in the database'}, 400
def handle(self, *args, **options): for superevent_id in self.superevent_ids: response = requests.get(f'https://gcn.gsfc.nasa.gov/other/{superevent_id}.gcn3') circulars = re.split(r'\/{10,}', response.text) for circular in circulars: date = None file_name = None file_content = io.BytesIO() for line in circular.splitlines(): entry = line.split(':', 1) if len(entry) > 1: if entry[0] == 'NUMBER': alert_id = entry[1].strip() response = requests.get(f'https://gcn.gsfc.nasa.gov/gcn3/{alert_id}.gcn3', stream=True) file_content.write(response.content) file_name = f'{alert_id}.gcn3' elif entry[0] == 'DATE' and 'MAG:' not in entry[1]: date = parse(entry[1], parserinfo=parserinfo(yearfirst=True)) if date and file_name and file_content: alert = ScrapedAlert.objects.create( alert_type='lvc_circular', timestamp=date, ) alert.alert_data.save(file_name, files.File(file_content)) alert.save()
def returnDate(dateTimeStr): try: if ' ' in dateTimeStr: dateTimeSplit = dateTimeStr.split(' ') modifiedDate = dateTimeSplit[0].replace(':', '-') newSplit = dateTimeStr.split(' ')[1:] newSplit.insert(0, modifiedDate) dateTimeStr = ' '.join(newSplit) elif 'T' in dateTimeStr: dateTimeSplit = dateTimeStr.split('T') modifiedDate = dateTimeSplit[0].replace(':', '-') newSplit = dateTimeStr.split('T')[1:] newSplit.insert(0, modifiedDate) dateTimeStr = 'T'.join(newSplit) elif '_' in dateTimeStr: dateTimeSplit = dateTimeStr.split('_') modifiedDate = dateTimeSplit[0].replace(':', '-') newSplit = dateTimeStr.split(' ')[1:] newSplit.insert(0, modifiedDate) dateTimeStr = ' '.join(newSplit) parserInfo = parserinfo(dayfirst=False, yearfirst=True) dateObj = parse(dateTimeStr, parserInfo, ignoretz=True) except Exception as e: arcpy.AddWarning( "{} is not in the required format.".format(dateTimeStr)) return None return dateObj
def export(lexicon): # TODO can user with only read permissions export all the lexicon? # (eg saol) auth, permitted = validate_user(mode="read") if lexicon not in permitted: raise eh.KarpAuthenticationError('You are not allowed to search the ' 'lexicon %s' % lexicon) settings = parser.make_settings(permitted, { "size": -1, "resource": lexicon }) query = request.query_string parsed = parser.parse_qs(query) parser.parse_extra(parsed, settings) date = settings.get('date', '') mode = settings.get('mode', '') if date: from dateutil.parser import parserinfo, parse from datetime import datetime # parse the date as inclusive (including the whole selected day) date = parse(date, parserinfo(yearfirst=True), default=datetime(1999, 01, 01, 23, 59)) to_keep = {} engine, db_entry = db.get_engine(lexicon, echo=False) logging.debug('exporting entries from %s ' % lexicon) for entry in db.dbselect(lexicon, engine=engine, db_entry=db_entry, max_hits=-1, to_date=date): _id = entry['id'] if _id in to_keep: last = to_keep[_id]['date'] if last < entry['date']: to_keep[_id] = entry else: to_keep[_id] = entry ans = [ val['doc'] for val in to_keep.values() if val['status'] != 'removed' ] ans = ans[:settings['size']] logging.debug('exporting %s entries' % len(ans)) if settings.get('format', ''): toformat = settings.get('format') index, typ = configM.get_mode_index(mode) msg = 'Unkown %s %s for mode %s' % ('format', toformat, mode) format_posts = configM.extra_src(mode, 'exportformat', helpers.notdefined(msg)) lmf, err = format_posts(ans, lexicon, mode, toformat) return Response(lmf, mimetype='text/xml') else: return jsonify({lexicon: ans})
def add(): """ Insert a payment. """ # read values. parserinfo = parser.parserinfo(dayfirst=True) paid_at = datetime.today() paid_at = parser.parse( click.prompt("Date", default=paid_at.strftime("%d/%m/%Y %H:%M")), parserinfo) net = click.prompt("Net", 0, type=float) # TODO: add defaults from selected invoice. tax = click.prompt("Tax", 0, type=float) # TODO: add defaults from selected invoice. gross = click.prompt( "Gross", 0, type=float) # TODO: add defaults from selected invoice. note = click.prompt("Note", default="") note = (note if note != "" else None) invoice = None # TODO: select invoice, maybe before. # display summary. click.echo() click.echo("Summary:") click.echo() click.echo( tabulate( [[ paid_at.strftime("%d/%m/%Y %H:%M"), str(gross), str(tax), str(net), note, "" # TODO: invoice ]], ['Date', 'Gross', 'Tax', 'Net', 'Note', 'Invoice'])) click.echo() if not click.confirm("Register payment?"): click.echo("Payment not registered.") return # register data. pay = Payment.create(configurator.customer, paid_at, gross, tax, net, invoice, note) payment_repo.create(pay) click.echo("Payment registered.") return
def __init__(self, *args, dayfirst=False, yearfirst=False, fuzzy=True, **kwargs): super().__init__(*args, **kwargs) self._parserinfo = date_parser.parserinfo(dayfirst=dayfirst, yearfirst=yearfirst) self._fuzzy = fuzzy
def to_json(self): from_date = parse_date( self.from_date, parserinfo=parserinfo(True)).strftime(date_format) to_date = parse_date(self.to_date, parserinfo=parserinfo(True)).strftime(date_format) json_body = { "measurement": "statement", "tags": { "year": self.year, "statement_number": self.statement_number, "from_date": from_date }, "time": to_date, "fields": { "old_balance": float(self.old_balance), "new_balance": float(self.new_balance), } } return json_body
def DateStringToDay(date_string): return [ 'Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday', ][dateParser.parse(date_string, parserinfo(dayfirst=True)).weekday()]
def insertMenu(self, name, calorie, date, meal_type): try: with self.mariadb_connection.cursor() as cursor: # Create a new record sql_date = parse(date, parserinfo( True, False)).strftime("%Y-%m-%d %H:%M:%S") sql = "INSERT INTO meal (name, calorie, type, submission_date) VALUES (%s, %s, %s, %s)" cursor.execute(sql, (name, calorie, meal_type, sql_date)) self.mariadb_connection.commit() except Exception as e: print(e)
def parse_seminar_date(date_string: str, url: str) -> datetime: """ Parses a date string as Brisbane Time. """ parser_info = parser.parserinfo(dayfirst=True) try: # The dates and times on the seminars page don't specify a timezone, so # the datetime returned by the parser will always be a native datetime date = parser.parse(date_string, parser_info) # Therefore, we must set the Brisbane Time Zone information. return date.replace(tzinfo=BRISBANE_TZ) except Exception: raise InvalidFormatException(url, f'Could not parse the date {date_string}')
def add(explicit): """ Insert work. """ # defaults date_dt = datetime.datetime.today() from_dt = datetime.datetime.today() to_dt = datetime.datetime.today() customer_id = configurator.customer minutes = None note = None add = None price = 12.0 parserinfo = parser.parserinfo(dayfirst=True) date_dt = parser.parse( click.prompt("Date", default=date_dt.strftime('%d/%m/%Y %H:%M')), parserinfo) from_dt = parser.parse( click.prompt("From", default=from_dt.strftime('%d/%m/%Y %H:%M')), parserinfo) to_dt = parser.parse( click.prompt("To", default=to_dt.strftime('%d/%m/%Y %H:%M')), parserinfo) if explicit: minutes = click.prompt("Minutes", default=-1, type=int) minutes = minutes if minutes != -1 else None if minutes is not None: from_dt = None to_dt = None registry = click.prompt("Registry") prod = click.prompt("Prod", default=True, type=bool) km = click.prompt("Km", default=0, type=int) client_id = click.prompt("Client Id", default=-1, type=int) client_id = client_id if client_id != -1 else None if explicit: add = click.prompt("Add", default=-1, type=int) add = add if add != -1 else None note = click.prompt("Note", default="") note = note if note != "" else None price = click.prompt("Price", default=configurator.price, type=float) w = Work.create(date_dt, from_dt, to_dt, registry, prod, km, client_id, customer_id, minutes, add, note, price) work_repo.create(w) return
def get_date_from_value(date_value): if not date_value: return None if isinstance(date_value, datetime.datetime): return date_value.date() if isinstance(date_value, datetime.date): return date_value date = re.search(r"\d+\.\d+\.\d+", date_value, re.IGNORECASE) if date: return parse(date.group(0), parserinfo=parserinfo(dayfirst=True)).date() return None
def getMenu(self, meal_type, date): menu = [] try: with self.mariadb_connection.cursor() as cursor: # Create a new record sql_date = parse(date, parserinfo( True, False)).strftime("%Y-%m-%d %H:%M:%S") sql = "SELECT name, calorie FROM meal WHERE (submission_date = %s AND type = %s)" cursor.execute(sql, (sql_date, meal_type)) for name, calorie in cursor: menu.append({'name': name, 'calorie': calorie}) except Exception as e: print(e) finally: return menu
def from_row(cls, row): def str_to_float(value: str) -> [float, None]: if value: return float(value.replace(' ', '').replace(',', '.').strip()) else: return None return cls( date=parse(row[1], parserinfo=parserinfo(dayfirst=True)), type=row[2], amount=str_to_float(row[3]), currency=row[4], payee=row[5], payee_acc_no=row[6], description=row[7], balance=row[8], )
def _shift_to_period_end( date: str, ) -> Optional[Callable[[DATETIME_TYPE], DATETIME_TYPE]]: """ Get function to shift the dates to the end of period. :param date: string date :return: a function to shift the dates to the end of period. If `None`, no shift is needed """ def shift_to_month_end(x: DATETIME_TYPE) -> DATETIME_TYPE: return x + pd.offsets.MonthEnd(0) def shift_to_quarter_end(x: DATETIME_TYPE) -> DATETIME_TYPE: return x + pd.offsets.QuarterEnd(0) def shift_to_year_end(x: DATETIME_TYPE) -> DATETIME_TYPE: return x + pd.offsets.YearEnd(0) if date[:4].isdigit(): if len(date) == 7: if date[5:].isdigit(): # "2020-12" format. return shift_to_month_end if date[5] == "Q": # "2021-Q1" format. return shift_to_quarter_end elif len(date) == 6: # "2021Q1" format. if date[4] == "Q": return shift_to_quarter_end elif len(date) == 4: # "2021" format. return shift_to_year_end # "September 2020" of "Sep 2020" format. # Get a flat list of month aliases. The full month name comes first. month_aliases = sum(dparse.parserinfo().MONTHS, ())[::-1] pattern = re.compile("|".join(month_aliases), re.IGNORECASE) match = pattern.search(date) if match is None: return span = match.span() date_without_month = f"{date[:span[0]]}{date[span[1]:]}".strip() if len(date_without_month) == 4 and date_without_month.isdigit(): return shift_to_month_end
def get_parsed_assessment_due_date(assessment_item): ''' Returns the parsed due date for the given assessment item as a datetime object. If the date cannot be parsed, a DateSyntaxException is raised. ''' _, _, due_date, _ = assessment_item if due_date == 'Examination Period': return get_current_exam_period() parser_info = parser.parserinfo(dayfirst=True) try: # If a date range is detected, attempt to split into start and end # dates. Else, attempt to just parse the whole thing. if ' - ' in due_date: start_date, end_date = due_date.split(' - ', 1) start_datetime = parser.parse(start_date, parser_info) end_datetime = parser.parse(end_date, parser_info) return start_datetime, end_datetime due_datetime = parser.parse(due_date, parser_info) return due_datetime, due_datetime except Exception: raise DateSyntaxException(due_date)
def parse_kills(contents, retention): m = re.findall('\-\-\- (\d\d\-\d\d\-\d\d \d\d:\d\d:\d\d)\\n(.+)\\n(.+)\\n(Ak-74|Barrett M82A1|' 'Chainsaw|Cluster Grenades|Combat Knife|Desert Eagles|FN Minimi|Grenade|Hands|HK MP5|LAW|M79|Ruger ' '77|Selfkill|Spas-12|Stationary gun|Steyr AUG|USSOCOM|XM214 Minigun)\n', contents) for kill in m: timestamp, killer, victim, weapon = map(string.strip, kill) suicide = killer == victim or weapon == 'Selfkill' date = parser.parse(timestamp, parser.parserinfo(yearfirst=True)) if retention.too_old(date): continue unixtime = int(time.mktime(date.timetuple())) yield Kill( killer, victim, weapon, unixtime, suicide )
def DateTimeStringToEpoch(date_time_string): return dateParser.parse(date_time_string, parserinfo(dayfirst=True)).timestamp()
def __init__(self, retention, filemanager): self.retention = retention self.filemanager = filemanager self.parse_kill_date_parserinfo = parser.parserinfo(yearfirst=True)
def add(explicit, report): """ Insert an invoice. """ parserinfo = parser.parserinfo(dayfirst=True) # defaults gross = 0 tax = 0 net = 0 if report is not None: pass # TODO: get defaults from report. # read values date_ = datetime.today() if explicit: date_ = parser.parse( click.prompt("Date", default=date_.strftime('%d/%m/%Y')), parserinfo) date_ = date_.date() prog = click.prompt("Progressive", default=invoice_repo.getNextProg(date_)) net = click.prompt("Net", net, type=float) tax = click.prompt("Tax", tax, type=float) gross = click.prompt("Gross", gross, type=float) reason = click.prompt("Reason", default="assistenza presso Vostri clienti") note = click.prompt("Note", default="") click.echo() click.echo("Summary:") click.echo() click.echo( tabulate([[ date_.strftime("%d/%m/%Y"), str(prog), str(gross), str(tax), str(net), reason, note ]], ['Date', 'Prog', 'Gross', 'Tax', 'Net', 'Reason', 'Note'])) click.echo() if not click.confirm("Emit invoice?"): click.echo("Invoice not emitted.") return inv = Invoice.create(date_, prog, gross, tax, net, reason, note, configurator.customer) invoice_repo.create(inv) click.echo("Invoice registered.") if click.confirm("Generate invoice file?"): doc.set_invoice_from(inv) doc.date = date_ ret = doc.generate() click.echo() if ret is False: click.echo("Error occurred: could not generate invoice file.") else: click.echo("Invoice emitted. Locate it at %s" % ret) return
def get_queryset(self): # noqa: C901 """Allow filtering leases by various query parameters `identifier` query parameter can be used to find the Lease with the provided identifier. example: .../lease/?identifier=S0120-219 `search` query parameter can be used to find leases by identifier and multiple other fields """ succinct = self.request.query_params.get("succinct") if succinct: queryset = Lease.objects.succinct_select_related_and_prefetch_related() else: queryset = Lease.objects.full_select_related_and_prefetch_related() if self.action != "list": return queryset # Simple search identifier = self.request.query_params.get("identifier") search = self.request.query_params.get("search") if identifier is not None or search is not None: if search is None: search_string = identifier search_by_other = False else: search_string = search search_by_other = True looks_like_identifier = bool( re.match(r"[A-Z]\d{4}-\d+$", search_string.strip(), re.IGNORECASE) ) # Search by identifier or parts of it if len(search_string) < 3: identifier_q = Q( identifier__type__identifier__istartswith=search_string ) elif len(search_string) == 3: identifier_q = Q( identifier__type__identifier__iexact=search_string[:2], identifier__municipality__identifier=search_string[2:3], ) elif len(search_string) < 7: district_identifier = search_string[3:5] if district_identifier == "0": identifier_q = Q( identifier__type__identifier__iexact=search_string[:2], identifier__municipality__identifier=search_string[2:3], identifier__district__identifier__in=range(0, 10), ) else: if district_identifier == "00": district_identifier = "0" else: district_identifier = district_identifier.lstrip("0") identifier_q = Q( identifier__type__identifier__iexact=search_string[:2], identifier__municipality__identifier=search_string[2:3], identifier__district__identifier__startswith=district_identifier, ) elif looks_like_identifier: district_identifier = search_string[3:5] if district_identifier == "00": district_identifier = "0" else: district_identifier = district_identifier.lstrip("0") identifier_q = Q( identifier__type__identifier__iexact=search_string[:2], identifier__municipality__identifier=search_string[2:3], identifier__district__identifier=district_identifier, identifier__sequence__startswith=search_string[6:], ) else: identifier_q = Q() other_q = Q() # Search also by other fields if the search string is clearly not a lease identifier if search_by_other and not looks_like_identifier: # Address other_q |= Q(lease_areas__addresses__address__icontains=search_string) # Property identifier other_q |= Q(lease_areas__identifier__icontains=search_string) normalized_identifier = normalize_property_identifier(search_string) if search_string != normalized_identifier: other_q |= Q( lease_areas__identifier__icontains=normalized_identifier ) # Tenantcontact name other_q |= Q( tenants__tenantcontact__contact__name__icontains=search_string ) if " " in search_string: tenant_name_parts = search_string.split(" ", 2) other_q |= Q( tenants__tenantcontact__contact__first_name__icontains=tenant_name_parts[ 0 ] ) & Q( tenants__tenantcontact__contact__last_name__icontains=tenant_name_parts[ 1 ] ) other_q |= Q( tenants__tenantcontact__contact__first_name__icontains=tenant_name_parts[ 1 ] ) & Q( tenants__tenantcontact__contact__last_name__icontains=tenant_name_parts[ 0 ] ) else: other_q |= Q( tenants__tenantcontact__contact__first_name__icontains=search_string ) other_q |= Q( tenants__tenantcontact__contact__last_name__icontains=search_string ) # Lessor other_q |= Q(lessor__name__icontains=search_string) other_q |= Q(lessor__first_name__icontains=search_string) other_q |= Q(lessor__last_name__icontains=search_string) # Date try: search_date = parse( search_string, parserinfo=parserinfo(dayfirst=True) ) if search_date: other_q |= Q(start_date=search_date.date()) other_q |= Q(end_date=search_date.date()) except ValueError: pass queryset = queryset.filter(identifier_q | other_q) # Advanced search search_form = LeaseSearchForm(self.request.query_params) if search_form.is_valid(): if search_form.cleaned_data.get("tenant_name"): tenant_name = search_form.cleaned_data.get("tenant_name") # Tenantcontact name q = Q(tenants__tenantcontact__contact__name__icontains=tenant_name) if " " in tenant_name: tenant_name_parts = tenant_name.split(" ", 2) q |= Q( tenants__tenantcontact__contact__first_name__icontains=tenant_name_parts[ 0 ] ) & Q( tenants__tenantcontact__contact__last_name__icontains=tenant_name_parts[ 1 ] ) q |= Q( tenants__tenantcontact__contact__first_name__icontains=tenant_name_parts[ 1 ] ) & Q( tenants__tenantcontact__contact__last_name__icontains=tenant_name_parts[ 0 ] ) else: q |= Q( tenants__tenantcontact__contact__first_name__icontains=tenant_name ) q |= Q( tenants__tenantcontact__contact__last_name__icontains=tenant_name ) if search_form.cleaned_data.get("tenantcontact_type"): q &= Q( tenants__tenantcontact__type__in=search_form.cleaned_data.get( "tenantcontact_type" ) ) if search_form.cleaned_data.get("only_past_tenants"): q &= Q(tenants__tenantcontact__end_date__lte=datetime.date.today()) if search_form.cleaned_data.get("tenant_activity"): if search_form.cleaned_data.get("tenant_activity") == "past": q &= Q( tenants__tenantcontact__end_date__lte=datetime.date.today() ) if search_form.cleaned_data.get("tenant_activity") == "active": # No need to filter by start date because future start dates are also considered active q &= Q(tenants__tenantcontact__end_date=None) | Q( tenants__tenantcontact__end_date__gte=datetime.date.today() ) queryset = queryset.filter(q) if search_form.cleaned_data.get("sequence"): queryset = queryset.filter( identifier__sequence=search_form.cleaned_data.get("sequence") ) if search_form.cleaned_data.get("lease_start_date_start"): queryset = queryset.filter( start_date__gte=search_form.cleaned_data.get( "lease_start_date_start" ) ) if search_form.cleaned_data.get("lease_start_date_end"): queryset = queryset.filter( start_date__lte=search_form.cleaned_data.get("lease_start_date_end") ) if search_form.cleaned_data.get("lease_end_date_start"): queryset = queryset.filter( end_date__gte=search_form.cleaned_data.get("lease_end_date_start") ) if search_form.cleaned_data.get("lease_end_date_end"): queryset = queryset.filter( end_date__lte=search_form.cleaned_data.get("lease_end_date_end") ) # Filter by active / expired only when only one of the options is set if bool(search_form.cleaned_data.get("only_active_leases")) ^ bool( search_form.cleaned_data.get("only_expired_leases") ): if search_form.cleaned_data.get("only_active_leases"): # No need to filter by start date because future start dates are also considered active queryset = queryset.filter( Q(end_date__isnull=True) | Q(end_date__gte=datetime.date.today()) ) if search_form.cleaned_data.get("only_expired_leases"): queryset = queryset.filter(end_date__lte=datetime.date.today()) if "has_geometry" in search_form.cleaned_data: if search_form.cleaned_data.get("has_geometry") is True: queryset = queryset.filter(lease_areas__geometry__isnull=False) if search_form.cleaned_data.get("has_geometry") is False: queryset = queryset.filter(lease_areas__geometry__isnull=True) if search_form.cleaned_data.get("property_identifier"): property_identifier = search_form.cleaned_data.get( "property_identifier" ) normalized_identifier = normalize_property_identifier( property_identifier ) queryset = queryset.filter( Q(lease_areas__identifier__icontains=property_identifier) | Q(lease_areas__identifier__icontains=normalized_identifier) ) if search_form.cleaned_data.get("address"): queryset = queryset.filter( lease_areas__addresses__address__icontains=search_form.cleaned_data.get( "address" ) ) if search_form.cleaned_data.get("lease_state"): queryset = queryset.filter( state__in=search_form.cleaned_data.get("lease_state") ) if search_form.cleaned_data.get("business_id"): queryset = queryset.filter( tenants__tenantcontact__contact__business_id__icontains=search_form.cleaned_data.get( "business_id" ) ) if search_form.cleaned_data.get("national_identification_number"): nat_id = search_form.cleaned_data.get("national_identification_number") queryset = queryset.filter( tenants__tenantcontact__contact__national_identification_number__icontains=nat_id ) if search_form.cleaned_data.get("lessor"): queryset = queryset.filter( lessor=search_form.cleaned_data.get("lessor") ) if search_form.cleaned_data.get("contract_number"): queryset = queryset.filter( contracts__contract_number__icontains=search_form.cleaned_data.get( "contract_number" ) ) if search_form.cleaned_data.get("decision_maker"): queryset = queryset.filter( decisions__decision_maker=search_form.cleaned_data.get( "decision_maker" ) ) if search_form.cleaned_data.get("decision_date"): queryset = queryset.filter( decisions__decision_date=search_form.cleaned_data.get( "decision_date" ) ) if search_form.cleaned_data.get("decision_section"): queryset = queryset.filter( decisions__section=search_form.cleaned_data.get("decision_section") ) if search_form.cleaned_data.get("reference_number"): reference_number = search_form.cleaned_data.get("reference_number") queryset = queryset.filter( Q(reference_number__icontains=reference_number) | Q(decisions__reference_number__icontains=reference_number) ) if search_form.cleaned_data.get("invoice_number"): queryset = queryset.filter( invoices__number__icontains=search_form.cleaned_data.get( "invoice_number" ) ) return queryset.distinct()
import typer from dateutil import parser, tz from dateutil.parser import parserinfo from celery_app import handle_attendee_updated, handle_event_updated from config import ORGS # logger = logging.getLogger(__name__) # formatter = logging.Formatter(fmt="%(levelname)s %(name)s/%(module)s:%(lineno)d - %(message)s") # console = logging.StreamHandler() # console.setFormatter(formatter) # logger.addHandler(console) # logger.setLevel(LOG_LEVEL) # # logger.propagate = False tzinfos = {x: tz.tzutc() for x in parserinfo().UTCZONE} os.environ["TZ"] = "UTC" time.tzset() # We use the Eventbrite REST API directly here instead of the SDK because the SDK doesn't support pagination or Organizations def fetch_list_from_eb(url: str, eb_api_key: str) -> dict: headers = {"Authorization": f"Bearer {eb_api_key}"} path = urlparse(url).path path = path.rstrip("/") item = os.path.basename(path) response = requests.get(url, headers=headers).json() final_response = response[item] while response["pagination"]["has_more_items"] is True:
def parse_date(self): self.alert.timestamp = parse(self.alert.parsed_message['date'], parserinfo=parserinfo(yearfirst=True))
def __getitem__(self,datetime): parseropt = parser.parserinfo(dayfirst=True) datestruct = parser.parse(datetime, parserinfo=parseropt) dateinms = int( datestruct.strftime( "%s" ) + '000' ) return dateinms
def test_convertyear_no_specified_century(n): p = parserinfo() new_year = p._year + n result = p.convertyear(new_year % 100, century_specified=False) assert result == new_year
def data(operation, file_type, file, ignore_empty_fields): """ Import or export works. """ # TODO: this operations should be encapsulated out cli package. if operation == 'import': if file_type == 'csv': if file is not None: with open(file, 'r') as csv_file: csv_reader = csv.reader(csv_file, delimiter=',') line_count = 0 for row in csv_reader: if row[0] is not '': if line_count >= 2: #parserinfo = parser.parserinfo(dayfirst=True) w = Work() w.customer = customer_repo.find( configurator.customer) w.date = parser.parse( row[0], parser.parserinfo(dayfirst=True)) # minutes automatically calculated from wpc. # minutes = sum(map(lambda x, y: x * y, map(int, row[3].split(".")), [60, 1, 0])) if row[3] is not '' else None try: minutes = datetime.datetime.strptime( row[3], "%H.%M.%S") except ValueError: minutes = datetime.datetime.strptime( row[3], "%H:%M") w.from_dt = datetime.datetime.strptime( row[1], "%H:%M" ) if row[1] is not '' else w.date.replace( hour=0, minute=0, second=0) w.from_dt = w.from_dt.replace( year=w.date.year, month=w.date.month, day=w.date.day) w.to_dt = datetime.datetime.strptime( row[2], "%H:%M" ) if row[2] is not '' else w.date.replace( hour=minutes.hour, minute=minutes.minute, second=0) w.to_dt = w.to_dt.replace(year=w.date.year, month=w.date.month, day=w.date.day) w.km = row[4] if row[4] is not '' else 0 w.prod = False if row[5] == 'FALSE' else True w.add = row[6] if row[6] is not '' else None w.note = row[7] if row[7] is not '' else None w.registry = row[8] if row[ 8] is not '' else None if ignore_empty_fields is False else '' w.price = 12 work_repo.create(w) line_count += 1 print(f'Processed {line_count} lines.') return
def test_convertyear(n): assert n == parserinfo().convertyear(n)
def _get_date_parser(): from dateutil.parser import parser, parserinfo info = parserinfo() info._year = 1930 + 50 info._century = 1900 return parser(info)
def __init__(self, heading, text, labels): self.ctime = parse(heading, parserinfo(dayfirst=True)) self.text = text self.labels = labels
def __init__(self, ctime, title, text, labels, attachments): self.ctime = parse(ctime, parserinfo(dayfirst=True)) self.title = title if title != ctime else None self.text = text self.labels = labels self.attachments = attachments
from time import mktime from dateutil import parser as date_parser import parsedatetime as pdt from datetime import datetime from .timezone import tz_abbreviations _day_first = date_parser.parserinfo(dayfirst=True) cal = pdt.Calendar() def parse_datetime(date_string, day_first=True, tz=tz_abbreviations, **kwargs): parser_info = _day_first if day_first else None parsed = date_parser.parse(date_string, fuzzy=True, parserinfo=parser_info, tzinfos=tz, **kwargs) return parsed def parse_date(string_date, day_first=True, **kwargs): return parse_datetime(string_date, day_first=day_first, **kwargs).date() def parse_datetime_fuzzy(fuzzy_str): time_structure = cal.parse(fuzzy_str) if time_structure: return datetime.fromtimestamp(mktime(time_structure[0])) def parse_date_fuzzy(string_date): date_time = parse_datetime_fuzzy(string_date) return date_time.date() if date_time else None
def parse(string, *args, **kwargs): result = join_tags(tagger.tag(string.split())) recur = False rrule_args = {} values = [] orig_today = kwargs.pop('default', None) info = parser.parserinfo() ignore = False if orig_today is None: today = datetime.today().date() else: kwargs['default'] = today = orig_today next = lambda: result[idx + 1] if len(result) > idx + 1 else (None, None) for idx, item in enumerate(result): if ignore: ignore = False continue value, type = item if type == 'recur': recur = True elif recur and type == 'recur_detail': rrule_args['freq'] = {'monthly': rrule.MONTHLY, 'daily': rrule.DAILY, 'weekly': rrule.WEEKLY, 'yearly': rrule.YEARLY}[value] if 'dtstart' not in rrule_args: rrule_args['dtstart'] = today elif recur and type == 'dmy': rrule_args['freq'] = {'day': rrule.DAILY, 'month': rrule.MONTHLY, 'week': rrule.WEEKLY, 'year': rrule.YEARLY}[value] if 'dtstart' not in rrule_args: rrule_args['dtstart'] = today elif type == 'day' and not next()[1] == 'detail': if recur: rrule_args.setdefault('byweekday', ()) if value in ('wday', 'weekday'): val = tuple(WEEKDAYS[k] for k in WEEKDAYS.keys() if k in ('mon', 'tue', 'wed', 'thu', 'fri')) else: val = (WEEKDAYS[value[:3]],) rrule_args['byweekday'] += val rrule_args['freq'] = rrule.WEEKLY else: values.append(value) elif type == 'month': if recur: rrule_args.setdefault('bymonth', ()) rrule_args['byweekday'] += (info.month(value),) rrule_args['freq'] = rrule.MONTHLY else: values.append(value) elif type == 'start': rrule_args['dtstart'] = parser.parse(next()[0], fuzzy=True) recur = True elif type == 'end': rrule_args['until'] = parser.parse(next()[0], fuzzy=True) recur = True elif type == 'time_detail': if recur: val = parser.parse(next()[0], fuzzy=True) rrule_args.setdefault('byhour', ()) rrule_args['byhour'] += (val.hour,) else: values.append('at') elif type == 'rel_day': now = today.timetuple() matcher = {'tod': today, 'yes': today + relativedelta(days=-1), 'tom': today + relativedelta(days=+1), 'ton': datetime(now[0], now[1], now[2], 22, now[4], now[5])} if not any(x for x in result if x[1] == 'start') and recur: rrule_args['dtstart'] = matcher[value[:3]] else: values.append(matcher[value[:3]].isoformat()) elif type == 'jump': next = next()[0][:3] if next in info._weekdays: dt = today + relativedelta(weekday=WEEKDAYS[next[:3]](+1)) elif next in info._months: dt = today + relativedelta(month=info._months[next] + 1) if not any(x for x in result if x[1] == 'start') and recur: rrule_args['dtstart'] = dt else: values.append(dt.isoformat()) ignore = True else: values.append(value) if recur: if 'dtstart' not in rrule_args: if orig_today: rrule_args['dtstart'] = orig_today else: rrule_args['dtstart'] = parser.parse(u' '.join(values), *args, **kwargs) return rrule.rrule(**rrule_args) else: kwargs['fuzzy'] = True return parser.parse(u' '.join(values), *args, **kwargs)
def parse_time(timestamp): time = parser.parse(timestamp, parser.parserinfo(dayfirst=True)) return time.strftime("%Y-%m-%dT%H:%M:%S+00:00")