def admin_app(self, origin, slug): if not self.is_admin(): raise exc.HTTPNotFound app = self.get_app(origin, session=self.session) if self.req.method == 'POST': p = self.req.params if p.get('delete'): self.session.delete(app) self.session.commit() return 'Deleted!' app.featured = bool(p.get('featured')) if p.get('featured_sort'): app.featured_sort = float(p['featured_sort']) else: app.featured_sort = None if p.get('featured_start'): app.featured_start = dateutil.parse(p['featured_start']) else: app.featured_start = None if p.get('featured_end'): app.featured_end = dateutil.parse(p['featured_end']) else: app.featured_end = None keywords = p.get('keywords') or '' keywords = [k.strip() for k in keywords.split(',') if k.strip()] app.keywords = keywords model.Keyword.add_words(keywords, session=self.session) self.session.add(app) self.session.commit() return exc.HTTPFound(app.url + '/admin') return self.render('admin_app', app=app)
def _cast_attr(value, default): if isinstance(default, int): return int(value) elif isinstance(default, float): return float(value) elif isinstance(default, datetime.date): return dateutil.parse(value).date() elif isinstance(default, datetime.datetime): return dateutil.parse(value) else: return value
def _cast_attr(value, default): env.require(lore.dependencies.DATEUTIL) import dateutil if isinstance(default, int): return int(value) elif isinstance(default, float): return float(value) elif isinstance(default, datetime.date): return dateutil.parse(value).date() elif isinstance(default, datetime.datetime): return dateutil.parse(value) else: return value
def save(self, *args, **kwargs): if not self.tweet_id and self.tweet: api = twitter.Twitter(self.twitter_user.username, self.twitter_user.password) tweet = api.statuses.update(self.tweet) self.tweet_id = tweet.id self.posted = dateutil.parse(tweet.created_at).astimezone(tzlocal()).replace(tzinfo=None) super(Tweet, self).save(*args, **kwargs)
def draft_notes_since(project, previous_release_date=None, labels=None): """Draft release notes containing the merged pull requests. Arguments --------- project: str Project to draft release notes from. Valid options are esmvaltool and esmvalcore previous_release_date: datetime.datetime date of the previous release labels: list list of GitHub labels that deserve separate sections """ project = project.lower() if previous_release_date is None: previous_release_date = PREVIOUS_RELEASE[project] else: previous_release_date = dateutil.parse(previous_release_date) if labels is None: labels = LABELS[project] pulls = _get_pull_requests(project) lines = DefaultDict(list) labelless_pulls = [] for pull in pulls: print(pull.updated_at, pull.merged_at, pull.number, pull.title) if pull.updated_at < previous_release_date: break if not pull.merged or pull.merged_at < previous_release_date: continue pr_labels = {label.name for label in pull.labels} for label in labels: if label in pr_labels: break else: labelless_pulls.append(pull) label = 'enhancement' lines[label].append((pull.closed_at, _compose_note(pull))) # Warn about label-less PR: _list_labelless_pulls(labelless_pulls) # Create sections sections = [ VERSION[project], '-' * len(VERSION[project]), '', "This release includes", ] for label in labels: try: entries = sorted(lines[label]) # sort by merge time except KeyError: continue title = TITLES.get(label, label.title()) sections.append('\n'.join(['', title, '~' * len(title), ''])) sections.append('\n'.join(entry for _, entry in entries)) notes = '\n'.join(sections) print(notes)
def _read_last_run(): with open(LAST_RUN_FILENAME, 'r') as f: raw = f.read() try: return dateutil.parse(raw) except Exception: return None
def get_photo_info(image_path): image_path = image_path.replace("\\", "/") album = "" taken_by = None image_data = image_path.split("/") date_taken = datetime.datetime.now().replace(tzinfo=utc) try: if Image.open(image_path)._getexif() is not None: creation_time = get_minimum_creation_time(Image.open(image_path)._getexif()) if creation_time is None: date_taken = 0 else: date_taken = dateutil.parse(creation_time.replace(':', '-', 2)).replace(tzinfo=utc) except AttributeError: pass # Do nothing if no date was in the photo property image_data = image_data[3:] # Strip first 3 elements off the aray response_data = dict() response_data['path'] = image_path image_data_length = len(image_data) if image_data_length >= 2: album = image_data[-3] if image_data_length >= 3: taken_by = image_data[-2] if image_data_length >= 4: album = string.join(image_data[:-2], ' / ') response_data['album'] = album response_data['taken_by'] = taken_by response_data['date_taken'] = str(date_taken) return response_data
def parse_dt(dt: str): """Get datetime object from datetime strings""" return dateutil.parse(dt)
def update_dictionary(global_dictionary, data_dictionary, file_path, csv_file_delimiter, stritr=''): """ Author : Niket Shinde Description : update values in global dictionary :param global_dictionary: :param data_dictionary: :param file_path: :param csv_file_delimiter: :param stritr: :return: dictionary object """ datadict = global_dictionary for key in datadict: text = datadict[key] cell_value = text text = str(text) logger.debug(key + ' : ' + text) #logger.debug(text[0:3].upper()) if text[0:3].upper() == "ITR": cell_value = data_dictionary[cell_value][key] logger.debug("Updated Test Dictionary Node : " + "[" + key + "] : " + str(cell_value)) if text.startswith(r'[') and text.endswith(r']'): text = text[1:-1] values = text.split(sep=',') print(values) for i, item in enumerate(values): # values[i] = item.strip() if '%' in values[i]: #values[i] = item[1:-1] #values[i] = datadict[values[i]] key_name = re.findall(r'%.*%', values[i])[0][1:-1] if key_name in datadict.keys(): values[i] = re.sub(r'%' + key_name + '%', str(datadict[key_name]), values[i]) else: pass #values[i] = updated_value elif values[i][0:3].upper() == "ITR": values[i] = data_dictionary[values[i]][key] else: pass if len(values) > 1: if values[1].lower() == '+': cell_value = int(values[0]) + int(values[2]) elif values[1].lower() == '-': cell_value = int(values[0]) - int(values[2]) elif values[0].lower() == 'concat': #cell_value = values[0] + ' ' + values[2] cell_value = str(values[1]).join(values[2:]) print("cell_value : ", cell_value) elif values[0].lower() == 'date': cell_value = datetime.datetime.today() if values[1].lower( ) == 'today' else dateutil.parse(values[1]) cell_value = generic.add_date(cell_value, int(values[2]), values[3]) cell_value = cell_value.date().strftime('%Y-%m-%d') elif values[0].lower() == 'datetime': cell_value = datetime.datetime.today() if values[1].lower( ) == 'today' else dateutil.parse(values[1]) cell_value = generic.add_date(cell_value, int(values[2]), values[3]) cell_value = cell_value.strftime('%Y-%m-%d %H:%M:%S') elif values[0].lower() == 'timestamp': cell_value = datetime.datetime.today() if values[2].lower( ) == 'today' else dateutil.parse(values[2]) cell_value = generic.add_date(cell_value, int(values[3]), values[4]) cell_value = cell_value.strftime('%Y-%m-%d %H:%M:%S') cell_value = generic.get_timestamp(cell_value, int(values[1])) else: pass print(cell_value) else: cell_value = values[0] elif text.startswith(r'$') and text.endswith(r'$'): if text in functions_list: if text == "$SSN$": #cell_value = Generic.pop_csv(file_path+"SSN.csv", datadict["DATASET"], csv_file_delimiter) cell_value = generic.get_SSN('Sweden') cell_value = re.sub(r'\..*', "", str(cell_value), re.I) print("SSN : " + cell_value) if text == "$GS1$": cell_value = generic.get_GS1_number('735999', '18') cell_value = re.sub(r'\..*', "", str(cell_value), re.I) print("GS1 : " + cell_value) if text == "$date$": cell_value = datetime.datetime.today() cell_value = cell_value.strftime('%Y-%m-%d') if text == "$datetime$": cell_value = datetime.datetime.today() cell_value = cell_value.strftime('%Y-%m-%d %H:%M:%S') elif text == '': if key in data_dictionary[stritr].keys(): if data_dictionary[stritr][key] != '': cell_value = data_dictionary[stritr][key] else: pass datadict[key] = cell_value logger.debug('<B>After update value in Global Data dictionary : </B>' + str(datadict), html=True) return datadict
def _get_time(self): str_value = self._dom_time.firstChild.nodeValue if str_value == '': return None else: return dateutil.parse(str_value)
import requests, json from dateutil import parse endpoint = "https://api.github.com/users/joelgrus/repos" repos = json.loads(requests.get(endpoint).text) dates = [parse(repo["created_at"]) for repo in repos] month_counts = Counter(date.month for date in dates) weekday_counts = Counter(date.weekday() for date in dates) last_5_repositories = sorted(repos, key=lambda r: r["created_at"], reverse=True)[:5] last_5_languages = [repo["language"] for repo in last_5_repositories]