def _get_row(entry): irel_dt = isoparse(entry['first_release_date'].replace( 'Z', '')) # TODO: boltons Z handling lrel_dt, zv_streak = None, None if entry.get('latest_release_date'): lrel_dt = isoparse(entry['latest_release_date'].replace('Z', '')) zv_streak = datetime.datetime.utcnow() - irel_dt zv_streak_years = round(zv_streak.days / 365.0, 1) row = [ tooltipped('<a href="%s">%s</a>' % (entry['url'], entry['name']), entry.get('reason')), tooltipped( '{:,}'.format(entry['star_count']) if entry.get('star_count') else NA_VAL, entry.get('reason')), tooltipped(irel_dt.year, entry.get('first_release_version')), '%s' % entry.get('release_count', NA_VAL) ] if lrel_dt: row.append( '%s (%s)' % (entry.get('latest_release_version', NA_VAL), lrel_dt.year)) else: row.append(NA_VAL) row.append('%s' % zv_streak_years) return row
def js_isoparse(date_str): try: ret = isoparse(date_str) except ValueError: # It may be a javascript Date object printed with toISOString() if date_str[-1] == 'Z': date_str = date_str[:-1] ret = isoparse(date_str) return ret
def _emeritus_to_htmltable(entries): headers = [ 'Project', 'Stars', 'First Released', '0ver Releases', 'Last 0ver release', '0ver years' ] rows = [] for entry in entries: irel_dt = isoparse(entry['first_release_date'].replace( 'Z', '')) # TODO: boltons Z handling lrel_dt, zv_streak = None, None if entry.get('first_nonzv_release_date'): lrel_dt = isoparse(entry['first_nonzv_release_date'].replace( 'Z', '')) zv_streak = lrel_dt - irel_dt zv_streak_years = round(zv_streak.days / 365.0, 1) row = [ tooltipped('<a href="%s">%s</a>' % (entry['url'], entry['name']), entry.get('reason')), tooltipped( '{:,}'.format(entry['star_count']) if entry.get('star_count') else NA_VAL, entry.get('reason')), tooltipped(irel_dt.year, entry.get('first_release_version')), '%s' % entry.get('release_count_zv', NA_VAL) ] if lrel_dt: row.append( '%s (%s)' % (entry.get('last_zv_release_version', NA_VAL), lrel_dt.year)) else: row.append(NA_VAL) row.append('%s' % zv_streak_years) rows.append(row) table = ZVTable.from_data(rows, headers=headers) ret = table.to_html() # table sorting js at bottom of base.html uses the stars class on # the heading to sort properly ret = ret.replace('<th>Stars</th>', '<th class="stars">Stars</th>') ret = ret.replace('<th>0ver Releases</th>', '<th class="releases">0ver Releases</th>') ret += '\n\n' return ret
def _date_param(text): text = text.strip() if text.startswith('-'): td = parse_timedelta(text) dt = datetime.datetime.utcnow() + td # adding a negative return dt dt = isoparse(text) return dt
def _get_commit_dt(repo_dir, commit_hash, **kw): kw.setdefault('env', {})['TZ'] = 'UTC' kw['cwd'] = repo_dir proc_res = run_cap([ 'git', 'show', '-s', '--format=%cd', '--date=format-local:%Y-%m-%dT%H:%M:%S', commit_hash ], **kw) date_text = proc_res.stdout.strip() return isoparse(date_text)
def _main(): start_time = time.time() with open(PROJ_PATH + '/projects.yaml') as f: projects = yaml.load(f)['projects'] #projects = [p for p in projects if p['name'] == 'scikit-learn'] #if not projects: # return try: with open(PROJ_PATH + '/projects.json') as f: cur_data = json.load(f) cur_projects = cur_data['projects'] cur_gen_date = isoparse(cur_data['gen_date']) except (IOError, KeyError): cur_projects = [] cur_gen_date = None if cur_gen_date: fetch_outdated = (datetime.datetime.utcnow() - cur_gen_date) > datetime.timedelta(seconds=3600) else: fetch_outdated = True cur_names = sorted([c['name'] for c in cur_projects]) new_names = sorted([n['name'] for n in projects]) tpr = os.getenv('TRAVIS_PULL_REQUEST') if tpr and tpr != 'false': print('Pull request detected. Skipping data update until merged.') return if fetch_outdated or cur_names != new_names or os.getenv( 'ZV_DISABLE_CACHING'): entries = fetch_entries(projects) else: print('Current data already up to date, exiting.') return pprint(entries) res = { 'projects': entries, 'gen_date': datetime.datetime.utcnow().isoformat(), 'gen_duration': time.time() - start_time } with atomic_save(PROJ_PATH + '/projects.json') as f: f.write( json.dumps(res, indent=2, sort_keys=True, default=_json_default)) return
def _get_project_repo_info_map(project_list, repo_dir): ret = OrderedDict() for project in project_list: target_repo_dir = os.path.join(repo_dir, project.name_slug) if not os.path.isdir(target_repo_dir): print_err('project %s repo directory not found at: %r' % (project.name, target_repo_dir)) continue pull_date_path = target_repo_dir + '/.apatite_last_pulled' with open(pull_date_path, 'r') as f: last_pulled_bytes = f.read() try: last_pulled = isoparse(last_pulled_bytes) except (TypeError, ValueError): print_err('project %s had unreadable pull date at: %r' % (project.name, pull_date_path)) continue ret[project] = (target_repo_dir, last_pulled) return ret
def from_json_path(cls, campaign, json_path, full): if not json_path: raise ValueError('missing json_path') with open(json_path, 'rb') as f: if json_path.endswith('.gz'): f = gzip.GzipFile(fileobj=f) state_data = json.load(f) campaign_results = state_data.get('campaign_results') if not campaign_results: print( 'WARNING: old data, no campaign results present, delete data and reupdate' ) ret = cls( campaign=campaign, timestamp=isoparse(state_data['timestamp']), campaign_results=campaign_results, goal_results=state_data['goal_results'], article_results=state_data['article_results'] if full else None, # title_list=state_data['title_list'], # no use for this yet state_file_save_date=state_data['save_date']) return ret
def parse_date(date): if date is None: return None return isoparse(date)
def _get_commit_dt(text): return isoparse(text.rsplit(':', 1)[0]).replace(second=0, microsecond=0)