def get_api_client(token, user=None, return_response_obj=True): return harvest.Harvest(HARVEST_API_URL, client_id=HARVEST_CLIENT_ID, client_secret=HARVEST_CLIENT_SECRET, token=token, token_updater=store_token, context=dict(user=user), return_response_obj=return_response_obj)
def setUp(self): personal_access_token = PersonalAccessToken('ACCOUNT_NUMBER', 'PERSONAL_ACCESS_TOKEN') self.harvest = harvest.Harvest('https://api.harvestapp.com/api/v2', personal_access_token) warnings.filterwarnings( "ignore", category=ResourceWarning, message="unclosed.*") # There's a bug in httpretty ATM. httpretty.enable()
def main(): client = harvest.Harvest("https://" + COMPANY_NAME + ".harvestapp.com", EMAIL, PASSWORD) # PrintDateInfo(client, "2019-07-08") # PrintDateInfo(client, "2019-07-09") dates = GetCurrentWeek() for date in dates: Clockify_CreateTimer(client, CLOCKIFY_PROJECT_NAME, str(date))
def harvest_batch(self, batchid, batch): """ takes number and a list of filenames and creates a harvest in the harvestpath with the name nlab_{batchid}.harvest """ outfile = f'{self.harvestpath}/nlab_{str(batchid)}.harvest' with hrvst.Harvest(outfile) as harvest: for file_name in batch: number = int(file_name.split('.')[0]) self.harvest_file(file_name, number, harvest)
url = a elif o in ("-e", "--email"): email = a elif o in ("-p", "--password"): password = a elif o in ("-d", "--date_key"): date_key = a elif o in ("-c", "--client_name"): client_name = a # get all the dates in this week, but we really need start/end end_date = datetime.datetime.strptime(date_key, "%Y-%m-%d").date() dates = get_dates_for_week_ending(end_date) start_date = dates[0] svc = harvest.Harvest(url, email, password) gavel = get_client_by_name(svc, client_name) gavel_id = gavel['client']['id'] projects = svc.projects_for_client(gavel_id) # Codes/IDs for active projects only pa = [(p['project']['code'], p['project']['id']) for p in projects if p['project']['active'] == True] for project in pa: print(project, end_date) project_code = project[0]
def connect_to_harvest(self): """ :return: this function return a harvest client """ return harvest.Harvest(f"https://{self._account}.harvestapp.com", self._email, self._password)
#!/usr/local/bin/python # harvest import harvest, sys import creds client = harvest.Harvest(creds.url, creds.username, creds.password) entries = client.today_user(creds.user) day_entries = entries['day_entries'] timerToggle = '' def timer(start): # Check to whether project already started linktree_entries = [] for entry in day_entries: # Linktree Development if (entry['project_id'] == creds.project_id and entry['task_id'] == creds.task_id): linktree_entries.append(entry) selected = [] for entry in linktree_entries: if (timer_started(entry)): selected = entry break else: # get last linktree task selected = linktree_entries[-1]
def setUp(self): self.harvest = harvest.Harvest("https://goretoytest.harvestapp.com", "*****@*****.**", "tester account")
#!/usr/local/bin/python2.7 import sys import json import argparse sys.path.append('./PythonHarvest') sys.path.append('./Requests') import harvest parser = argparse.ArgumentParser() parser.add_argument('-u', help='Harvest username') parser.add_argument('-p', help='Harvest password') args = parser.parse_args() h = harvest.Harvest("https://thirdmind.harvestapp.com", args.u, args.p) today = h.get_today() today_data = today() print json.dumps(today_data['day_entries'])
def main(args): pp = pprint.PrettyPrinter(indent=4) # create a Toggl object and set our API key toggl_account = Toggl() toggl_account.setAPIKey(args.toggl_key) toggl_tz_str = toggl_account.request( "https://www.toggl.com/api/v8/me")['data']['timezone'] toggl_tz = pytz.timezone(toggl_tz_str) # figure out what ranges to sync for if args.days: edate = datetime.today().replace( hour=0, minute=0, second=0, microsecond=0) + timedelta(days=1) sdate = edate - timedelta(days=abs(args.days) + 1) elif args.daterange: dates = [dateparser.parse(x) for x in args.daterange] if len(dates) < 2: dates.append(datetime.today().replace(hour=0, minute=0, second=0, microsecond=0)) dates = sorted(dates) sdate = dates[0] edate = dates[1] + timedelta(days=1) else: edate = datetime.today().replace( hour=0, minute=0, second=0, microsecond=0) + timedelta(days=1) sdate = edate + timedelta(days=-365) sdate_aw = toggl_tz.localize(sdate) edate_aw = toggl_tz.localize(edate) # do some fancy date windowing required for retrieving tasks from toggl toggl_dateranges = [] chunks = (edate - sdate).days // 180 partials = (edate - sdate).days % 180 for i in range((edate - sdate).days // 180): toggl_dateranges.append([ sdate + timedelta(days=i * 180), sdate + timedelta(days=(i + 1) * 180 - 1) ]) if partials: toggl_dateranges.append([ sdate + timedelta(days=chunks * 180), sdate + timedelta(days=chunks * 180 + partials) ]) toggl_dateranges = [[toggl_tz.localize(dr[0]), toggl_tz.localize(dr[1])] for dr in toggl_dateranges] # collect toggl entries toggl_entries = [] toggl_clients = toggl_account.getClients() toggl_workspaces = toggl_account.getWorkspaces() toggl_projects = [] for client in toggl_clients: toggl_projects = toggl_projects + toggl_account.getClientProjects( client['id']) for wid in [w['id'] for w in toggl_workspaces]: for dr in toggl_dateranges: entries_left = 1 page = 1 while entries_left > 0: entries = toggl_account.request( Endpoints.REPORT_DETAILED, { 'workspace_id': wid, 'user_agent': 'https://github.com/a3ng7n/timesheet-sync', 'page': page, 'since': dr[0], 'until': dr[1] }) toggl_entries = entries['data'] + toggl_entries entries_left = entries['total_count'] - entries[ 'per_page'] if page == 1 else entries_left - entries[ 'per_page'] page += 1 task_names = [{ 'id': str(x['pid']) + x['description'], 'pid': x['pid'], 'description': x['description'] } for x in toggl_entries] toggl_task_names = list({x['id']: x for x in task_names}.values()) toggl_task_names = sorted(toggl_task_names, key=lambda k: k['pid'] if k['pid'] else 0) for i, t in enumerate(toggl_task_names): t['id'] = i # collect harvest entries harvest_account = harvest.Harvest(uri=args.harvest_url, account_id=args.harvest_account_id, personal_token=args.harvest_key) try: harvest_user_id = [ x['user']['id'] for x in harvest_account.users if x['user']['email'] == args.harvest_email ].pop() except IndexError: print("Could not find user with email address: {0}".format( args.harvest_email)) raise tasks = [] harvest_entries = [] harvest_clients = harvest_account.clients() for client in harvest_clients: harvest_projects = harvest_account.projects_for_client( client['client']['id']) for project in harvest_projects: some_entries = harvest_account.timesheets_for_project( project['project']['id'], start_date=sdate_aw.isoformat(), end_date=edate_aw.isoformat()) for e in some_entries: e['day_entry']['client_id'] = [ y['project']['client_id'] for y in harvest_projects if y['project']['id'] == e['day_entry']['project_id'] ].pop() harvest_entries = harvest_entries + some_entries tasks = tasks + [{ **x['task_assignment'], 'client_id': client['client']['id'] } for x in harvest_account.get_all_tasks_from_project( project['project']['id'])] task_names = [{**x, 'id': str(x['client_id'])\ + str(x['project_id'])\ + str(x['task_id'])} for x in tasks] harvest_task_names = list({x['id']: x for x in task_names}.values()) harvest_task_names = sorted(harvest_task_names, key=lambda k: k['client_id']) for i, t in enumerate(harvest_task_names): t['id'] = i task_association = task_association_config(toggl_account, toggl_task_names, harvest_account, harvest_task_names) # organize toggl entries by dates worked delta = edate - sdate dates = [sdate + timedelta(days=i) for i in range(delta.days + 1)] combined_entries_dict = {} for date in dates: # collect entries from either platform on the given date from_toggl = [ x for x in toggl_entries if ((dateutil.parser.parse(x['start']).astimezone(toggl_tz) > toggl_tz.localize(date)) and ( dateutil.parser.parse(x['start']).astimezone(toggl_tz) <= toggl_tz.localize(date) + timedelta(days=1))) ] from_harvest = [ x['day_entry'] for x in harvest_entries if dateutil.parser.parse(x['day_entry']['spent_at']).astimezone( toggl_tz) == toggl_tz.localize(date) ] if from_toggl or from_harvest: combined_entries_dict[date] = { 'toggl': { 'raw': from_toggl, 'tasks': {} }, 'harvest': { 'raw': from_harvest, 'tasks': {} } } # organize raw entries into unique tasks, and total time for that day for platform in combined_entries_dict[date].keys(): for entry in combined_entries_dict[date][platform]['raw']: if platform == 'toggl': if entry['pid'] not in combined_entries_dict[date][ platform]['tasks'].keys(): combined_entries_dict[date][platform]['tasks'][ entry['pid']] = {} try: combined_entries_dict[date][platform]['tasks'][ entry['pid']][entry[ 'description']] += entry['dur'] / 3600000 except KeyError: combined_entries_dict[date][platform]['tasks'][ entry['pid']][entry[ 'description']] = entry['dur'] / 3600000 else: try: combined_entries_dict[date][platform]['tasks'][ entry['notes']] += entry['hours'] except KeyError: combined_entries_dict[date][platform]['tasks'][ entry['notes']] = entry['hours'] # add data to harvest add_to_harvest = [] for date, entry in combined_entries_dict.items(): if entry['toggl']['tasks'] and not entry['harvest']['tasks']: for pid in entry['toggl']['tasks'].keys(): for task in entry['toggl']['tasks'][pid].keys(): for hidpair in list( zip( task_association[pid][task] ['harvest_project_id'], task_association[pid] [task]['harvest_task_id'])): add_to_harvest.append({ 'project_id': hidpair[0], 'task_id': hidpair[1], 'spent_at': date.date().isoformat(), 'hours': round(entry['toggl']['tasks'][pid][task], 2), 'notes': task }) print("The following Toggl entries will be added to Harvest:") pp.pprint(add_to_harvest) if input("""Add the entries noted above to harvest? (y/n)""").lower() in ( 'y', 'yes'): for entry in add_to_harvest: pp.pprint( harvest_account.add_for_user(user_id=harvest_user_id, data=entry)) else: print('aborted') exit(1) print('done!') exit(0)
def get_harvest(with_tracked_time=True, with_assignments=False): """ Extract harvest data using the python-harvest package. NB: The master branch of python-harvest currently seems to be using the v1 version of the api. This version of the API is deprecated. The branch "v2_dev" of python-harvest works with harvests v2 API but doesn't seem to be fully functioning for all tables, most noticeably the time_entries table. """ start = time.time() harvest_api_credentials = wimbledon.config.get_harvest_credentials() token = harvest.PersonalAccessToken( account_id=harvest_api_credentials["harvest_account_id"], access_token=harvest_api_credentials["access_token"], ) client = harvest.Harvest("https://api.harvestapp.com/api/v2", token) auth_user = client.get_currently_authenticated_user() print("AUTHENTICATED USER:"******"""Convert the attributes of each object in a list of objects into a pandas dataframe.""" df = [obj.__dict__ for obj in objs] df = pd.DataFrame.from_dict(df) # add prefix to columns if given if prefix is not None: df.columns = prefix + "." + df.columns # unpack any harvest objects into normal columns of values df = unpack_class_columns(df) return df def unpack_class_columns(df): """python-harvest returns some columns as an instance of another harvest data type. This function unpacks the values of those columns, creating a new column for each of the unpacked attributes (with name <COL_NAME>.<ATTRIBUTE_NAME>)""" # all columns which have ambiguous pandas 'object' type obj_cols = df.columns[df.dtypes == "object"] # most common type in each of these columns, excluding missing values col_types = { col: df[col].dropna().apply(type).mode() for col in obj_cols } # exclude columns which have no most common type (i.e. empty columns) col_types = { col: str(mode[0]) for col, mode in col_types.items() if len(mode) > 0 } # find columns containing some instance from the harvest library harvest_cols = [ col for col, mode in col_types.items() if "harvest" in mode ] # convert each column of harvest objects into a pandas df unpacked_cols = [ objs_to_df(df[col], prefix=col) for col in harvest_cols ] # add new columns to data frame for new_cols in unpacked_cols: df = pd.concat([df, new_cols], axis=1, sort=True) # remove original harvest object columns df.drop(harvest_cols, axis=1, inplace=True) return df def get_all_pages(client_function): """The harvest API returns max 100 results per query. This function calls the API as many times as necessary to extract all the query results. client_function: a function from an initiated python-harvest client, e.g. client.users""" result = client_function() total_pages = result.total_pages # the data to convert is in an attribute of the response, e.g. in a users response the data is in result.users df = objs_to_df(getattr(result, client_function.__name__)) # get the remaining pages, if there are any if result.total_pages > 1: for i in range(2, total_pages + 1): result = client_function(page=i) result = objs_to_df(getattr(result, client_function.__name__)) df = df.append(result, ignore_index=True) df.set_index("id", inplace=True) return df print("CLIENTS") clients = get_all_pages(client.clients) print("PROJECTS") projects = get_all_pages(client.projects) print("ROLES") roles = get_all_pages(client.roles) print("USERS") users = get_all_pages(client.users) print("TASKS") tasks = get_all_pages(client.tasks) if with_assignments: print("USER ASSIGNMENTS") user_assignments = get_all_pages(client.user_assignments) print("TASK ASSIGNMENTS") task_assignments = get_all_pages(client.task_assignments) if with_tracked_time: """ Issues with python-harvest module: time_entries: Currently fails due to time_entries.cost_rate should be "float" instead of "NoneType" error client_contacts, invoices, estimates, expenses: Also fail, usually due to some missing field error, but not sure we use any of those tables? Below is my own quick function to extract the time entries data... it's quite slow requiring 30+ queries, but the API returns max 100 results at a time so probably not a lot that can be done to improve it. """ def api_to_df(table, headers): """Query all pages of a table in harvest.""" url = "https://api.harvestapp.com/v2/" + table print("Querying", url, "...", end="") req_time = time.time() response = requests.get(url, headers=headers) json_response = response.json() df = pd.json_normalize(json_response[table]) diff = time.time() - req_time print("{:.1f} seconds".format(diff)) while json_response["links"]["next"] is not None: url = json_response["links"]["next"] print("Querying", url, "... ", end="") req_time = time.time() response = requests.get(url, headers=headers) json_response = response.json() new_entries = pd.json_normalize(json_response[table]) df = df.append(new_entries) diff = time.time() - req_time print("{:.1f} seconds".format(diff)) # wait a bit to prevent getting throttled (allowed max 100 requests per 15 seconds) if diff < 0.15: time.sleep(0.15 - diff) df.set_index("id", inplace=True) return df api_headers = { "User-Agent": "*****@*****.**", "Authorization": "Bearer " + harvest_api_credentials["access_token"], "Harvest-Account-ID": harvest_api_credentials["harvest_account_id"], } print("TIME ENTRIES:") time_entries = api_to_df("time_entries", api_headers) print("=" * 50) print("DONE! ({:.1f}s)".format(time.time() - start)) harvest_data = { "clients": clients, "projects": projects, "roles": roles, "users": users, "tasks": tasks, } if with_assignments: harvest_data["user_assignments"] = user_assignments harvest_data["task_assignments"] = task_assignments if with_tracked_time: harvest_data["time_entries"] = time_entries return harvest_data
def setup(self): self.ha = harvest.Harvest() self.actsrv = HarvestActSrv("harvest_act")
# from jira_connection import jira from jira import JIRA from dateutil import parser import re import subprocess import threading import time from typing import Dict config_tokens = load(open('oauth_tokens.yml', 'r')) # connection to harvest h = harvest.Harvest( 'https://mev.harvestapp.com', client_id='----', # Add client ID token=config_tokens['access_token'], token_updater=config_tokens['refresh_token'], put_auth_in_header=False) # connection to jira options = {'server': 'https://center.atlassian.net'} jira = JIRA(options=options, basic_auth=('login', 'pass')) #Add login to JIRA def sync_work_log(harvest_project_code, task_jira_mask): # Regex for find task in JIRA task_regx = re.compile(r'{0}\d+'.format(task_jira_mask)) # Regex for find task id from harvest task_harvest_id_regx = re.compile(r'^\d+')