Exemplo n.º 1
0
def handler(event, context):
    # Grab the data passed to the lambda function through the browser URL (API Gateway)
    try:
        project_id = (event.get('pathParameters').get('id'))
    except Exception as e:
        payload = {"message": "Id path parameter not given"}
        return response_formatter(status_code='400', body=payload)

    columns = ["board_name", "board_id", "rolling_time_window_days", "issue_filter", "last_issue_change",
               "include_subtasks", "excluded_issue_types"]

    try:
        redshift = RedshiftConnection()
        results = redshift.getIssueConfiguration(project_id)
    except Exception:
        redshift.closeConnection()
        payload = {"message": "Internal Error"}
        return response_formatter(status_code='500', body=payload)

    if not results:
        payload = {"message": "No resource with project ID {} found".format(project_id)}
        return response_formatter(status_code='404', body=payload)

    project_issue_configs = {}
    for row in results:
        for index, data in enumerate(row):
                project_issue_configs[underscore_to_camelcase(columns[index])] = data

    return response_formatter(status_code='200', body=project_issue_configs)
Exemplo n.º 2
0
def handler(event, context):
    # Validate user input
    try: 
        # User input
        data = json.loads(event['body'])
        work_states = data['workStates']
        default_lead_time_start_state = data['defaultLeadTimeStartState']
        default_lead_time_end_state = data['defaultLeadTimeEndState']
    except:
        payload = {'message': 'Invalid user input'}
        return response_formatter(status_code='400', body=payload)
    
    # Parse project_id from the URL
    try:
        project_id = (event.get('pathParameters').get('id'))
    except:
        payload = {"message": "Could not get id path parameter"}
        return response_formatter(status_code='400', body=payload)
    
    # Update work states
    try:
        status_state_values = [] # Values to insert in team_status_states table
        work_state_values = []    # Vaues to insert in team_work_states table
        seq_number = 0           # Sequence counter for team_work_states table
        
        for work_state in work_states:
            for status in work_state['status']:
                status_state_values.append((int(project_id), str(status), str(work_state['name'])))
            
            work_state_values.append((int(project_id), str(work_state['name']), seq_number))
            seq_number += 1

        redshift = RedshiftConnection()
        redshift.updateTeamStatusStates(project_id, status_state_values)
        redshift.updateTeamWorkStates(project_id, work_state_values)
        redshift.updateDefaultLeadTimeStates(project_id, default_lead_time_start_state, default_lead_time_end_state)
    except:
        payload = {'message': 'Internal error'}
        return response_formatter(status_code='500', body=payload)
    finally:
        redshift.closeConnection()

    return response_formatter(status_code='200', body={})
Exemplo n.º 3
0
def handler(event, context):
    # Validate user input
    try:
        # User input
        data = json.loads(event['body'])
        work_types = data
    except:
        payload = {'message': 'Invalid user input'}
        return response_formatter(status_code='400', body=payload)

    # Parse project_id from the URL
    try:
        project_id = (event.get('pathParameters').get('id'))
    except:
        payload = {"message": "Could not get id path parameter"}
        return response_formatter(status_code='400', body=payload)

    # Update work types
    try:
        insert_value_list = []
        for key in work_types:
            for issue in work_types[key]:
                insert_value_list.append((project_id, issue, key))
        redshift = RedshiftConnection()
        redshift.updateTeamWorkTypes(project_id, insert_value_list)
    except:
        payload = {'message': 'Internal error'}
        return response_formatter(status_code='500', body=payload)
    finally:
        redshift.closeConnection()

    return response_formatter(status_code='200', body={})
Exemplo n.º 4
0
def handler(event, context):
    # Validate user input
    try:
        # User input
        data = json.loads(event['body'])
        repos = data
    except:
        payload = {'message': 'Invalid user input'}
        return response_formatter(status_code='400', body=payload)

    # Parse project_id from the URL
    try:
        project_id = (event.get('pathParameters').get('id'))
    except:
        payload = {"message": "Could not get id path parameter"}
        return response_formatter(status_code='400', body=payload)

    # Validate repository names
    try:
        for repo in repos:
            GITHUB_API = git_etl_constants.GIT_REPO_URL.format(repo=repo)
            r = requests.get(GITHUB_API,
                             headers={
                                 'Authorization':
                                 'token %s' % git_etl_constants.GIT_API_KEY
                             })
            if r.status_code != 200:
                payload = {
                    'message': 'Invalid repository name: \'{}\''.format(repo)
                }
                return response_formatter(status_code='400', body=payload)
    except:
        # Git API fail
        payload = {'message': 'Service Unavailable'}
        return response_formatter(status_code='503', body=payload)

    try:
        redshift = RedshiftConnection()
        insert_repo_list = []
        for repo in repos:
            insert_repo_list.append((project_id, repo))
        redshift.updateTeamRepos(project_id, insert_repo_list)
    except:
        payload = {'message': 'Internal error'}
        return response_formatter(status_code='500', body=payload)
    finally:
        redshift.closeConnection()

    return response_formatter(status_code='200', body={})
Exemplo n.º 5
0
    pg_tables = pg.all_tables()
    pg_tables = sorted(pg_tables, key=str.lower)
    empty_tables = []
    pg_rows = []
    migrated_tables = []
    for table in pg_tables:
        num_rows = pg.num_rows(table)
        if num_rows == 0:
            empty_tables.append(table)
        else:
            migrated_tables.append(table)
            pg_rows.append(num_rows)
    pg.close()

    # check rows in Redshift
    rs = RedshiftConnection()
    rs_tables = rs.all_tables()
    rs_tables = sorted(rs_tables, key=str.lower)
    rs_rows = []
    for table in rs_tables:
        rs_rows.append(rs.num_rows(table))
    rs.close()

    # output table names and number of rows in Postgres and Redshift
    df = pd.DataFrame(data={
        'Table_Name': migrated_tables,
        "Postgres": pg_rows,
        'Redshift': rs_rows
    })
    columns_titles = ['Table_Name', 'Postgres', 'Redshift']
    df = df.reindex(columns=columns_titles)
Exemplo n.º 6
0
def handler(event, context):
    # Grab the data passed to the lambda function through the browser URL (API Gateway)
    try:
        project_id = (event.get('pathParameters').get('id'))
    except Exception as e:
        payload = {"message": "Id path parameter not given"}
        return response_formatter(status_code='400', body=payload)

    redshift = RedshiftConnection()
    try:
        redshift.validateProjectID(project_id)
    except Exception:
        redshift.closeConnection()
        payload = {
            "message":
            "No resource with project ID {} found".format(project_id)
        }
        return response_formatter(status_code='404', body=payload)

    try:
        # Fetch list of state names in sequence
        state_names = redshift.getWorkStates(
            project_id)  # list[(state_name(str)]
    except Exception:
        redshift.closeConnection()
        payload = {"message": "Internal Error"}
        return response_formatter(status_code='500', body=payload)

    try:
        # Fetch default lead time start/end states
        default_lead_time_start_state = redshift.getLeadTimeStartState(
            project_id)
        default_lead_time_end_state = redshift.getLeadTimeEndState(project_id)
    except Exception:
        redshift.closeConnection()
        payload = {"message": "Internal Error"}
        return response_formatter(status_code='500', body=payload)

    work_states = []
    for state in state_names:
        try:
            # Fetch list of status belonging to the state
            status = redshift.statusListOfState(project_id,
                                                state[0])  # list[str]
            work_states.append({'name': state[0], 'status': status})
        except:
            payload = {"message": "Internal Error"}
            return response_formatter(status_code='500', body=payload)

    payload = {}
    payload['defaultLeadTimeStartState'] = default_lead_time_start_state
    payload['defaultLeadTimeEndState'] = default_lead_time_end_state
    payload['workStates'] = work_states

    redshift.closeConnection()
    return response_formatter(status_code='200', body=payload)
Exemplo n.º 7
0
def handler(event, context):
    # Grab the data passed to the lambda function through the browser URL (API Gateway)
    try:
        projectID = event.get('pathParameters').get('id')
    except Exception as e:
        payload = {"message": "Id path parameter not given: {]".format(e)}
        return response_formatter(status_code=404, body=payload)

    redshift = RedshiftConnection()

    try:
        redshift.validateProjectID(projectID)
    except Exception as e:
        redshift.closeConnection()
        payload = {"message": "No resource with project ID {} found: {}".format(projectID, e)}
        return response_formatter(status_code=404, body=payload)

    try:
        # Grab the query string parameter of offset(days), dateUntil, dateSince, and workTypes
        queryParameters = QueryParameters(event)
        days = queryParameters.getDays()
        dateUntilParameter = queryParameters.getDate('dateUntil')
        dateSinceParameter = queryParameters.getDate('dateSince')
        workTypes = queryParameters.getWorktypes()

        workTypeParser = WorkTypeParser(workTypes, projectID)
        workTypeParser.validateWorkTypes(redshift.getCursor(), redshift.getConn())

        rollingWindowDays = redshift.selectRollingWindow(projectID)
        # Convert rollingWindowDays to rollingWindowWeeks
        rollingWindowWeeks = int(math.floor(rollingWindowDays / 7.0))
        timeInterval = TimeIntervalCalculator(dateUntilParameter, dateSinceParameter, days)
        timeInterval.decrementDateSinceWeeks(rollingWindowWeeks)

    except ValueError as err:
        redshift.closeConnection()
        payload = {"message": "{}".format(err)}
        return response_formatter(status_code=400, body=payload)

    # Get the actual start and end date after adding rolling weeks in epoch format
    dateSince = timeInterval.getDateSinceInt()
    dateUntil = timeInterval.getDateUntilInt()

    # Generate list of weeks
    endDate = dateUntil
    startDate = dateSince
    rollingWeeks = [startDate]
    secsPerWeek = 604800
    # Insert into weeks all the mondays until dateUntil for label purposes
    while startDate < endDate:
        startDate += secsPerWeek
        rollingWeeks.append(startDate)

    # Init redshift connection
    connection_detail = {
        'dbname': os.environ['DATABASE_NAME'],
        'host': os.environ["CLUSTER_ENDPOINT"],
        'port': os.environ['REDSHIFT_PORT'],
        'user': os.environ['AWS_RS_USER'],
        'password': os.environ['AWS_RS_PASS']
    }

    conn = psycopg2.connect(**connection_detail)

    # Get the sequence for start and end states for current project
    default_state_query = """
    SELECT seq_number
    FROM   team_project, team_work_states
    WHERE  team_project.id = %s
    AND    team_work_states.team_project_id = %s
    AND    (team_work_states.state_name = team_project.default_lead_time_start_state OR 
            team_work_states.state_name = team_project.default_lead_time_end_state)
    ORDER BY seq_number
    """
    with conn:
        with conn.cursor() as cur:
            cur.execute(default_state_query, (projectID, projectID))
            default_state_results = cur.fetchall()
            start_state_seq = default_state_results[0][0]
            end_state_seq = default_state_results[1][0]

    # Get all work states for current project and generate dict for lead time calculation purposes
    work_state_query = """
    SELECT state_name, seq_number
    FROM team_work_states
    WHERE team_project_id = %s
    ORDER BY seq_number
    """
    with conn:
        with conn.cursor() as cur:
            cur.execute(work_state_query, (projectID,))
            work_states_results = cur.fetchall()
            lead_time_states = [work_state for work_state, work_seq in work_states_results if
                                start_state_seq <= work_seq < end_state_seq]
            work_states_dict = {work_seq: work_state for work_state, work_seq in work_states_results}

    # Filter out invalid issue types and resolutions
    issueTypesList = workTypeParser.issueTypesList
    invalidResolutionsList = workTypeParser.invalidResolutionsList

    # Init rolling interval dict for each week for output purposes
    rolling_intervals = {}
    for index in range(len(rollingWeeks)):
        if index + rollingWindowWeeks >= len(rollingWeeks):
            # Avoids indexing out of range
            break
        week = datetime.datetime.fromtimestamp(rollingWeeks[index + rollingWindowWeeks], tz=pytz.utc).isoformat()
        rolling_intervals[week] = {
            "rolling_interval_start": rollingWeeks[index],
            "rolling_interval_end": rollingWeeks[index+rollingWindowWeeks],
            "leadtime": {
                "Overall": []
            }
        }
        for state in lead_time_states:
            rolling_intervals[week]["leadtime"][state] = []

    # Get all issues within time range and their work state changing history
    issue_query = """
    SELECT issue_key,
           listagg(CASE WHEN s1.seq_number IS NULL THEN -1 ELSE s1.seq_number END,',') within group(ORDER BY issue_change.changed) AS prev_number_seq,
           listagg(CASE WHEN s2.seq_number IS NULL THEN -1 ELSE s2.seq_number END,',') within group(ORDER BY issue_change.changed) AS new_number_seq,
           listagg(issue_change.changed,',') within group(ORDER BY issue_change.changed) AS changed_seq
    FROM issue_change
      LEFT JOIN (SELECT team_status_states.team_project_id,
                        team_status_states.status,
                        team_status_states.state_name,
                        team_work_states.seq_number
                 FROM team_status_states
                   LEFT JOIN team_work_states
                          ON team_status_states.team_project_id = team_work_states.team_project_id
                         AND team_status_states.state_name = team_work_states.state_name) s1
             ON s1.team_project_id = issue_change.team_project_id
            AND s1.status = issue_change.prev_value
      LEFT JOIN (SELECT team_status_states.team_project_id,
                        team_status_states.status,
                        team_status_states.state_name,
                        team_work_states.seq_number
                 FROM team_status_states
                   LEFT JOIN team_work_states
                          ON team_status_states.team_project_id = team_work_states.team_project_id
                         AND team_status_states.state_name = team_work_states.state_name) s2
             ON s2.team_project_id = issue_change.team_project_id
            AND s2.status = issue_change.new_value
    WHERE issue_change.team_project_id = %s
    AND   field_name = 'Status'
    AND (%s = 0 OR issue_type IN %s)
    AND (%s = 0 OR resolution NOT IN %s)
    GROUP BY issue_key
    """
    with conn:
        with conn.cursor() as cur:
            cur.execute(issue_query,
                        (projectID,
                         1 if issueTypesList else 0,
                         tuple(issueTypesList) if issueTypesList else (None,),
                         1 if invalidResolutionsList else 0,
                         tuple(invalidResolutionsList) if invalidResolutionsList else (None,)
                         )
                        )
            results = cur.fetchall()

    # Convert results to dict format
    issues = [{"issue_name": result[0],
               "raw_info": zip(result[1].split(","), result[2].split(","), result[3].split(",")),
               "latest_seq": int(result[2].split(",")[-1])
               } for result in results]

    # If latest/current status is not after lead time end state, it means issue is not done and should be filtered out
    # This keeps only finished issues in result set, meaning every issue will now have all worktime states and will be a finished issue
    issues = [issue for issue in issues if issue["latest_seq"] >= end_state_seq]
    # issues = [issue for issue in issues if issue["leadtime"].get("Overall")]

    # still need to filter out issues that were closed before or after dateSince/DataUntil
    counter = 0
    issuesToDelete = []
    #since poping shifts the indices, each time something needs to be poped, must be subtracted by number of pops needing to be done
    numOfPops = 0

    for issue in issues:
        isIssueDeleted = False
        # Init lead time dictionary
        issue["leadtime"] = {el: 0 for el in [item for item in lead_time_states]}
        # Find the first time to get into leadtime state from pre-leadtime state
        for info in issue["raw_info"]:
            prev_seq_number = int(info[0])
            next_seq_number = int(info[1])
            state_transition_time = int(info[2])
            if prev_seq_number < start_state_seq <= next_seq_number < end_state_seq:
                issue["start_state_time"] = state_transition_time
                break
        # Find the last time to get into post-leadtime state from leadtime state
        for info in reversed(issue["raw_info"]):
            prev_seq_number = int(info[0])
            next_seq_number = int(info[1])
            state_transition_time = int(info[2])
            if start_state_seq <= prev_seq_number < end_state_seq <= next_seq_number:
                issue["end_state_time"] = state_transition_time
                break

        #if issue was completed before or after the set amount of time passed into leadtime script, remove it from issues
        if ("end_state_time" in issue) and (issue["end_state_time"] < int(dateSince) or issue["end_state_time"] > int(dateUntil)) and isIssueDeleted == False:
            issuesToDelete.append(counter-numOfPops)
            numOfPops = numOfPops + 1
            isIssueDeleted = True

        # Calculate overall leadtime
        if issue.get("start_state_time") and issue.get("end_state_time"):
            start_time = datetime.datetime.fromtimestamp(issue["start_state_time"])
            end_time = datetime.datetime.fromtimestamp(issue["end_state_time"])
            issue_working_days = TimeIntervalCalculator.workday_diff(start_time, end_time)             
            issue["leadtime"]["Overall"] = float("{0:.2f}".format(issue_working_days))
        # if needed parameters don't exist, remove from loop
        elif isIssueDeleted == False:
            issuesToDelete.append(counter-numOfPops)
            numOfPops = numOfPops + 1
            isIssueDeleted = True            

        #remove issue if it is less than 15 minutes (0.01) to prevent issues from being displayed on chart as 0
        if ("Overall" in issue["leadtime"]) and issue["leadtime"]["Overall"] < 0.01 and isIssueDeleted == False:
            issuesToDelete.append(counter-numOfPops)
            numOfPops = numOfPops + 1
            isIssueDeleted = True

        counter = counter + 1

    #issues = [issue for issue in issues if issue["leadtime"].get("Overall")]

    # Filter out if the issue did not have finish during the time period
    for num in issuesToDelete:
        issues.pop(num)

    for issue in issues:
        # Calculate lead time for each work state
        state_transition_time = -1
        # Loop through the state changing history and add up lead time for all states
        for info in issue["raw_info"]:
            prev_work_state = work_states_dict.get(int(info[0]))
            new_state_transition_time = int(info[2])
            if prev_work_state in lead_time_states and state_transition_time > 0:
                start_time = datetime.datetime.fromtimestamp(state_transition_time)
                end_time = datetime.datetime.fromtimestamp(new_state_transition_time)
                issue_working_days = TimeIntervalCalculator.workday_diff(start_time, end_time)
                issue["leadtime"][prev_work_state] += issue_working_days

            # Update for looping purposes
            state_transition_time = new_state_transition_time

        # Insert issue lead time into all intervals in ascending order for the percentile calculation
        for key, value in rolling_intervals.iteritems():
            if (value["rolling_interval_start"] < issue["start_state_time"] < value["rolling_interval_end"] and
                    value["rolling_interval_start"] < issue["end_state_time"] < value["rolling_interval_end"]):
                for state, leadtime in issue["leadtime"].iteritems():
                    insort(value["leadtime"][state], leadtime)

    # Init Output
    payload = {
        "fiftieth": {},
        "eightieth": {},
        "ninetieth": {}
    }
    for percentile, content in payload.iteritems():
        for state in lead_time_states:
            content[state] = []
        content["Overall"] = []

    # Generate Output
    for key, value in rolling_intervals.iteritems():
        for state, leadtime in value["leadtime"].iteritems():
            payload["fiftieth"][state].append((key, percentile_calculation(0.5, leadtime)))
            payload["eightieth"][state].append((key, percentile_calculation(0.8, leadtime)))
            payload["ninetieth"][state].append((key, percentile_calculation(0.9, leadtime)))

    # Rearrange Output
    for percentile_res, content in payload.iteritems():
        for state, leadtimes in content.iteritems():
            leadtimes.sort(key=itemgetter(0))

    return response_formatter(status_code=200, body=payload)
Exemplo n.º 8
0
            filename = str(table) + current_time() + '.csv'
            filename = filename.lower()
            files.append(filename)
            local_path = '/home/ubuntu/temp/' + filename

            sql_export_table = "COPY ({}) TO STDOUT WITH CSV HEADER".format(query_all_data)
            with open(local_path, 'w') as f:
                pg_cur.copy_expert(sql_export_table, f)

            s3.upload_file(local_path, bucketname, filename)

    pg.close()
    log.info("Files uploaded to s3 bucket")

    rs = RedshiftConnection()
    prevent_csv_overflow()
    # load tables from S3 into Redshift
    for file_name in files:
        try:
            f = open(PROJ_DIR + "temp/" + file_name, 'r')
            reader = csv.reader(f)
            table_name = file_name[:-24]
            rs.delete_existing_tables(table_name)
            create_table_statement = create_table_in_redshift(reader, table_name)
            copy_table_statement = copy_table(table_name, file_name)
            f.close()
            rs.run_query_commit(create_table_statement)
            rs.run_query_commit(copy_table_statement)
            log.info("Done migrating " + str(table_name))
Exemplo n.º 9
0
def handler(event, context):
    # Validate user input
    try:
        # User input
        data = json.loads(event['body'])
        board_name = data['boardName']
        include_subtasks = data['includeSubtasks']
        excluded_issue_types = data['excludedIssueTypes']
        issue_filter = data['issueFilter']
        project_name = data['projectName']
    except:
        payload = {'message': 'Invalid user input'}
        return response_formatter(status_code='400', body=payload)

    # Parse project_id from the URL
    try:
        project_id = int(event.get('pathParameters').get('id'))
    except:
        payload = {"message": "Could not get id path parameter"}
        return response_formatter(status_code='400', body=payload)

    JH_USER = os.environ['JH_USER']
    JH_PASS = os.environ['JH_PASS']
    JH_JIRAURL = os.environ['JH_JIRAURL']

    # Validate board_name exists
    try:
        encoded_board_name = urllib.quote(board_name, safe='')
        JIRA_BOARD_API = web_api_constants.BOARD_NAME_URL.format(
            JH_JIRAURL, encoded_board_name)
        content = requests.get(JIRA_BOARD_API, auth=(JH_USER, JH_PASS)).json()
        boards = content['values']
        for board in boards:
            if board['name'] == board_name:
                board_id = board['id']
        board_id  # raise exception if it does not exist
    except:
        payload = {'message': 'Invalid board name: \'{}\''.format(board_name)}
        return response_formatter(status_code='400', body=payload)

    # Validate Project name change
    try:
        redshift = RedshiftConnection()
        project_exists = redshift.validateProjectNameChange(
            project_id, project_name)
        # If project name already exists in other projects:
        if project_exists:
            payload = {
                'message':
                'Project name {} already exists'.format(project_name)
            }
            return response_formatter(status_code='400', body=payload)
    except Exception as e:
        payload = {'message': 'Internal error: {}'.format(e)}
        return response_formatter(status_code='500', body=payload)

    # Validate excluded_issue_type
    try:
        JIRA_ISSUE_TYPE = web_api_constants.JIRA_SEARCH_URL.format(JH_JIRAURL)
        issue_types = requests.get(JIRA_ISSUE_TYPE,
                                   auth=(JH_USER, JH_PASS)).json()
        # Split csv to array
        excluded_issue_types_list = excluded_issue_types.split(
            ',') if excluded_issue_types else []
        if excluded_issue_types_list:
            for excluded_issue_type in excluded_issue_types_list:
                # Raise exception if issue_type is invalid
                if not any(issue_type for issue_type in issue_types
                           if issue_type['name'] == excluded_issue_type):
                    raise Exception(excluded_issue_type)
    except Exception as e:
        payload = {'message': 'Invalid issue type: \'{}\''.format(e)}
        return response_formatter(status_code='400', body=payload)

    # Validate JQL
    queryString = urllib.quote(issue_filter, safe='')
    queryString += '&fields=*none&maxResults=0'
    pageURL = web_api_constants.JQL_SEARCH_URL.format(queryString)
    r = requests.get(pageURL, auth=(JH_USER, JH_PASS))
    if not r.ok:
        payload = {"message": "Error in the JQL Query: {}".format(r.content)}
        return response_formatter(status_code='400', body=payload)

    try:
        redshift = RedshiftConnection()

        # Update issues in database
        try:
            issue_type_list = redshift.get_issue_types(project_id)
            all_issue_types = [{
                "name": issue_type[0],
                "subtask": issue_type[1]
            } for issue_type in issue_type_list]
        except Exception as e:
            payload = {
                'message': 'Invalid project key in db: \'{}\''.format(e)
            }
            return response_formatter(status_code='400', body=payload)

        redshift.updateIssues(project_id, board_name, include_subtasks,
                              issue_filter, all_issue_types,
                              excluded_issue_types_list, project_name)
    except ValueError as e:
        payload = {'message': "Issue Type Error: {}".format(e)}
        return response_formatter(status_code='400', body=payload)
    except Exception as e:
        payload = {'message': 'Internal error: {}'.format(e)}
        return response_formatter(status_code='500', body=payload)
    finally:
        redshift.closeConnection()

    return response_formatter(status_code='200', body={})
Exemplo n.º 10
0
def handler(event, context):
    # Grab the data passed to the lambda function through the browser URL (API Gateway)
    try:
        projectID = (event.get('pathParameters').get('id'))
    except Exception as e:
        print(e)
        payload = {"message": "Id path parameter not given"}
        response = {"statusCode": 400, "body": json.dumps(payload)}
        return response

    redshift = RedshiftConnection()
    cur = redshift.getCursor()

    try:
        redshift.validateProjectID(projectID)
    except Exception:
        redshift.closeConnection()
        payload = {
            "message": "No resource with project ID {} found".format(projectID)
        }
        response = {"statusCode": 404, "body": json.dumps(payload)}
        return response

    try:
        # Grab the query string parameter of offset(days), dateUntil, dateSince, and workTypes
        queryParameters = QueryParameters(event)
        days = queryParameters.getDays()
        dateUntil = queryParameters.getDate('dateUntil')
        dateSince = queryParameters.getDate('dateSince')
        workTypes = queryParameters.getWorktypes()

        workTypeParser = WorkTypeParser(workTypes, projectID)
        workTypeParser.validateWorkTypes(redshift.getCursor(),
                                         redshift.getConn())

        timeInterval = TimeIntervalCalculator(dateUntil, dateSince, days)
        timeInterval.decrementDateSinceWeeks(1)

    except ValueError as err:
        redshift.closeConnection()
        payload = {"message": "{}".format(err)}
        response = {"statusCode": 400, "body": json.dumps(payload)}
        return response

    dateSince = timeInterval.getDateSinceInt()
    dateUntil = timeInterval.getDateUntilInt()

    endDate = dateUntil
    startDate = dateSince
    weeks = [startDate]

    secsPerWeek = 604800
    # Insert into weeks all the mondays until dateUntil
    while (startDate < endDate):
        startDate += secsPerWeek
        weeks.append(startDate)

    status_list = get_completion_event_statuses(redshift, projectID)

    issueTypesList = workTypeParser.issueTypesList
    invalidResolutionsList = workTypeParser.invalidResolutionsList

    selectCompletedQuery = """
    SELECT MAX(changed) AS maxdate, issue_key FROM issue_change 
    WHERE team_project_id = %s
    AND changed < %s
    AND changed >= %s
    AND new_value IN %s
    AND prev_value IN %s
    AND (%s = 0 OR issue_type IN %s)
    AND (%s = 0 OR resolution NOT IN %s)
    GROUP BY issue_key ORDER BY maxdate ASC
    """
    selectCompletedQuery = cur.mogrify(
        selectCompletedQuery,
        (projectID, weeks[-1], weeks[0], tuple(status_list["completed"]),
         tuple(status_list["working"]), 1 if issueTypesList else 0,
         tuple(issueTypesList) if issueTypesList else
         (None, ), 1 if invalidResolutionsList else 0,
         tuple(invalidResolutionsList) if invalidResolutionsList else
         (None, )))
    try:
        changes = redshift.executeCommitFetchAll(selectCompletedQuery)
    except Exception:
        print("Could not perform query: {}".format(selectCompletedQuery))
        redshift.closeConnection()
        payload = {"message": "Internal server error"}
        response = {"statusCode": 500, "body": json.dumps(payload)}
        return response

    ticketsByWeekPayload = []
    ticketsByWeekPayload = jira_throughput_tickets.buildTicketPayload(
        changes, weeks)

    changeIndex = 0
    payload = []
    organizedTotals = []

    for week in weeks:
        if week == weeks[0]:
            continue
        completed = 0
        while (changeIndex < len(changes) and changes[changeIndex][0] < week):
            completed += 1
            changeIndex += 1
        # isoformat implicitly assumes utc time without appending trailing 'Z'
        weekStr = datetime.datetime.fromtimestamp(
            week, tz=pytz.utc).isoformat() + "Z"
        payload.append([weekStr, completed])
        organizedTotals.append(completed)

    organizedTotals = sorted(organizedTotals)
    lengthOfDataSet = len(organizedTotals)

    # Calculate striaght percentile values using the R7 statistical method
    # https://en.wikipedia.org/wiki/Quantile (find: R-7)
    ninetiethPercentilesStraightPoint = R7PercentileCalculator(
        90.0, organizedTotals, lengthOfDataSet)
    eightiethPercentilesStraightPoint = R7PercentileCalculator(
        80.0, organizedTotals, lengthOfDataSet)
    fiftiethPercentilesStraightPoint = R7PercentileCalculator(
        50.0, organizedTotals, lengthOfDataSet)
    twentiethPercentilesStraightPoint = R7PercentileCalculator(
        20.0, organizedTotals, lengthOfDataSet)
    tenthPercentilesStraightPoint = R7PercentileCalculator(
        10.0, organizedTotals, lengthOfDataSet)

    #make each "straight percentile" an array of values of equal length to
    ninetiethPercentilesStraight = [ninetiethPercentilesStraightPoint
                                    ] * lengthOfDataSet
    eightiethPercentilesStraight = [eightiethPercentilesStraightPoint
                                    ] * lengthOfDataSet
    fiftiethPercentilesStraight = [fiftiethPercentilesStraightPoint
                                   ] * lengthOfDataSet
    twentiethPercentilesStraight = [twentiethPercentilesStraightPoint
                                    ] * lengthOfDataSet
    tenthPercentilesStraight = [tenthPercentilesStraightPoint
                                ] * lengthOfDataSet

    payload.append(["fiftiethStraight", fiftiethPercentilesStraight])
    payload.append(["eightiethStraight", eightiethPercentilesStraight])
    payload.append(["ninetiethStraight", ninetiethPercentilesStraight])
    payload.append(["twentiethStraight", twentiethPercentilesStraight])
    payload.append(["tenthStraight", tenthPercentilesStraight])

    #since 2 outputs needed to be encoded into body of response, separate by string to parse and break into
    #2 outputs on front-end
    newPayload = payload + ["tickets"] + ticketsByWeekPayload

    response = {
        "statusCode": 200,
        "headers": {
            "Access-Control-Allow-Origin":
            "*",  # Required for CORS support to work
            "Access-Control-Allow-Credentials":
            True  # Required for cookies, authorization headers with HTTPS
        },
        "body": json.dumps(newPayload),
    }

    redshift.closeConnection()
    return response
Exemplo n.º 11
0
def handler(event, context):
    # Grab the data passed to the lambda function through the browser URL (API Gateway)
    try:
        projectID = (event.get('pathParameters').get('id'))
    except Exception as e:
        print (e)
        payload = {"message": "Id path parameter not given"}
        response={
            "statusCode": 400,
            "body": json.dumps(payload)
        }
        return response

    redshift = RedshiftConnection()
    cur = redshift.getCursor()

    selectIDQuery = cur.mogrify("SELECT name, id FROM team_project WHERE id = %s", (projectID,))

    try:
        IDResults = redshift.executeCommitFetchAll(selectIDQuery)
    except Exception:
        redshift.closeConnection()
        payload = {"message": "Internal Error"}
        response={
            "statusCode": 500,
            "body": json.dumps(payload)
        }
        return response

    if not IDResults:
        redshift.closeConnection()
        payload = {"message": "No resource with project ID {} found".format(projectID)}
        response={
            "statusCode": 404,
            "body": json.dumps(payload)
        }
        return response

    # Grab the query string parameter of dateUntil, dateSince, and offset
    # If omitted dateUntil will be set to current date and time
    # If omitted dateSince will be 90 days before dateUntil
    # These repetitive try/catch blocks are necessary as Lambda throw an exception if the specified
    # parameter is not given.
    try:
        days = int(event.get('queryStringParameters').get('days'))
    except Exception as e:
        # If days not given, set it to default of 90
        days = 90
    try:
        dateUntil = event.get('queryStringParameters').get('dateUntil')
    except Exception as e:
        dateUntil = None
    try:
        dateSince = event.get('queryStringParameters').get('dateSince')
    except Exception as e:
        dateSince = None
    try:
        workTypes = event.get('queryStringParameters').get('workTypes')
    except Exception as e:
        # If workTypes not given, all issue types will be returned
        workTypes = None

    try:
        # Try to decode the given date parameters, if undecodable throw exception
        query_param = QueryParameters()
        dateUntil = query_param.decodeDateParam(dateUntil)
        dateSince = query_param.decodeDateParam(dateSince)
    except ValueError as err:
        redshift.closeConnection()
        payload = {"message": "{}".format(err)}
        response={
            "statusCode": 400,
            "body": json.dumps(payload)
        }
        return response


    workTypeParser = WorkTypeParser(workTypes,projectID)
    try:
        workTypeParser.validateWorkTypes(redshift.getCursor(), redshift.getConn())
    except ValueError as err:
        redshift.closeConnection()
        payload = {"message": "{}".format(err)}
        response={
            "statusCode": 400,
            "body": json.dumps(payload)
        }
        return response

    try:
        timeInterval = TimeIntervalCalculator(dateUntil, dateSince, days)
        timeInterval.decrementDateSinceWeeks(1)
    except ValueError as err:
        payload = {"message": "{}".format(err)}
        response={
            "statusCode": 400,
            "body": json.dumps(payload)
        }
        return response

    dateSince = timeInterval.getDateSinceInt()
    dateUntil = timeInterval.getDateUntilInt()

    endDate = dateUntil
    startDate = dateSince
    weeks = [startDate]

    secsPerWeek = 604800
    # Insert into weeks all the mondays until dateUntil
    while (startDate < endDate):
        startDate += secsPerWeek
        weeks.append(startDate)

    issueTypesList = workTypeParser.issueTypesList

    status_list = get_completion_event_statuses(redshift, projectID)

    try:
        selectOpenClosedQuery = """
        SELECT changed, new_value, issue_key
        FROM  issue_change 
        WHERE team_project_id = %(project_id)s
        AND   prev_value = ''
        AND   changed < %(date_until)s
        AND   field_name = 'Status'
        AND (%(issue_type_flag)s = 0 OR issue_type IN %(issue_type_list)s)
        UNION
        SELECT completed.changed,
               'Completed' AS new_value,
               completed.issue_key
        FROM (SELECT MAX(changed) AS changed,
                     'Completed' AS new_value,
                     issue_key
              FROM  issue_change
              WHERE team_project_id = %(project_id)s
              AND   new_value IN %(completed_status)s
              AND   changed < %(date_until)s
              AND   field_name = 'Status'
              AND (%(issue_type_flag)s = 0 OR issue_type IN %(issue_type_list)s)
              GROUP BY issue_key) completed
          LEFT JOIN issue_change uncompleted
                 ON uncompleted.issue_key = completed.issue_key
                AND uncompleted.changed > completed.changed
                AND uncompleted.new_value NOT IN %(completed_status)s
        WHERE uncompleted.changed IS NULL
        ORDER BY changed
        """
        selectOpenClosedQuery = cur.mogrify(selectOpenClosedQuery, {
            "project_id": projectID,
            "date_until": dateUntil,
            "completed_status": tuple(status_list["completed"]),
            "issue_type_list": tuple(issueTypesList) if issueTypesList else (None,),
            "issue_type_flag": 1 if issueTypesList else 0
        })
        cur.execute(selectOpenClosedQuery)
        changes = cur.fetchall()
    except Exception as e:
        print ("ERROR: {}".format(e))
        redshift.closeConnection()
        payload = {"message": "Internal server error"}
        response={
            "statusCode": 500,
            "body": json.dumps(payload)
        }
        return response

    changeIndex = 0

    payload = {"Created": [],
               "Completed": []}
    completed = 0
    created = 0
    for week in weeks:
        while changeIndex < len(changes) and changes[changeIndex][0] < week:
            newValue = changes[changeIndex][1]
            if newValue == 'Completed':
                completed += 1
            elif newValue == 'Open':
                created += 1
            changeIndex += 1

        # isoformat implicitly assumes utc time without appending trailing 'Z'
        weekStr = datetime.datetime.fromtimestamp(week, tz=pytz.utc).isoformat()
        payload["Created"].append([weekStr, created])
        payload["Completed"].append([weekStr, completed])

    response={
        "statusCode": 200,
        "headers": {
            "Access-Control-Allow-Origin" : "*", # Required for CORS support to work
            "Access-Control-Allow-Credentials" : True # Required for cookies, authorization headers with HTTPS
        },
        "body": json.dumps(payload)
    }

    redshift.closeConnection()
    return response
Exemplo n.º 12
0
class VgerRedshiftPreprocessor(LambdaPreprocessor):
    def __init__(self, event):
        LambdaPreprocessor.__init__(self, event)
        self.redshift = RedshiftConnection()

    @preprocessor_error_handling
    def verify_project_id(self):
        try:
            self.param["project_id"] = self.event.get('pathParameters').get(
                'id')
        except Exception as e:
            payload = {
                'message': 'Missing Attribute in path parameters: {}'.format(e)
            }
            return response_formatter(body=payload)

    @preprocessor_error_handling
    def validate_project_id(self):
        try:
            self.redshift.validateProjectID(self.param["project_id"])
        except Exception as e:
            payload = {
                'message':
                'Project with id={0} cannot be found: {1}'.format(
                    self.param["project_id"], e)
            }
            return response_formatter(status_code='404', body=payload)

    @preprocessor_error_handling
    def generate_query_parameters(self, category="", time=True):
        try:
            query_param = QueryParameters(self.event)
            if time:
                self.param["days"] = query_param.getDays()
                self.param["query_date_until"] = query_param.getDate(
                    'dateUntil')
                self.param["query_date_since"] = query_param.getDate(
                    'dateSince')
            if category == "repo":
                self.param["repo_list"] = query_param.getRepoName().split(
                    ',') if query_param.getRepoName() else []
        except Exception as e:
            payload = {'message': 'Invalid query parameters: {0}'.format(e)}
            return response_formatter(status_code='404', body=payload)

    @preprocessor_error_handling
    def verify_project_repo(self):
        try:
            if self.param.get("repo_list"):
                db_repo = self.redshift.getRepos(self.param["project_id"])
                invalid_repo = [
                    str(repo) for repo in self.param["repo_list"]
                    if str(repo) not in db_repo
                ]
                if invalid_repo:
                    raise ValueError(invalid_repo)
        except Exception as e:
            payload = {'message': 'Invalid repository request: {}'.format(e)}
            return response_formatter(status_code='404', body=payload)

    @preprocessor_error_handling
    def generate_rolling_window_weeks(self):
        try:
            rolling_window_days = self.redshift.selectRollingWindow(
                self.param["project_id"])
            self.param["rolling_window_weeks"] = rolling_window_days // 7
        except Exception as e:
            payload = {
                'message':
                'Error on calculating rolling window weeks: {}'.format(e)
            }
            return response_formatter(status_code='500', body=payload)

    @preprocessor_error_handling
    def generate_time_interval_date(self, trace_back=False):
        try:
            time_interval_calculator = TimeIntervalCalculator(
                self.param["query_date_until"], self.param["query_date_since"],
                self.param["days"])
            # Shift back one week to count PRs made in the week before the following Monday
            time_interval_calculator.decrementDateSinceWeeks(
                self.param["rolling_window_weeks"] if trace_back else 1)

            self.param["date_since"] = time_interval_calculator.getDateSince()
            self.param["date_until"] = time_interval_calculator.getDateUntil()
        except ValueError as e:
            payload = {'message': 'Invalid date request: {}'.format(e)}
            return response_formatter(status_code='404', body=payload)
Exemplo n.º 13
0
 def __init__(self, event):
     LambdaPreprocessor.__init__(self, event)
     self.redshift = RedshiftConnection()
Exemplo n.º 14
0
def handler(event, context):
    # Grab the data passed to the lambda function through the browser URL (API Gateway)
    try:
        projectID = (event.get('pathParameters').get('id'))
    except Exception as e:
        print(e)
        payload = {"message": "Id path parameter not given"}
        response = {"statusCode": 400, "body": json.dumps(payload)}
        return response

    redshift = RedshiftConnection()
    cur = redshift.getCursor()

    try:
        redshift.validateProjectID(projectID)
    except Exception:
        redshift.closeConnection()
        payload = {
            "message": "No resource with project ID {} found".format(projectID)
        }
        response = {"statusCode": 404, "body": json.dumps(payload)}
        return response

    try:
        # Grab the query string parameter of offset(days), dateUntil, dateSince, and workTypes
        queryParameters = QueryParameters(event)
        days = queryParameters.getDays()
        dateUntil = queryParameters.getDate('dateUntil')
        dateSince = queryParameters.getDate('dateSince')
        workTypes = queryParameters.getWorktypes()

        workTypeParser = WorkTypeParser(workTypes, projectID)
        workTypeParser.validateWorkTypes(redshift.getCursor(),
                                         redshift.getConn())

        rollingWindowDays = redshift.selectRollingWindow(projectID)
        # Convert rollingWindowDays to rollingWindowWeeks
        rollingWindowWeeks = int(math.floor(rollingWindowDays / 7.0))
        timeInterval = TimeIntervalCalculator(dateUntil, dateSince, days)
        timeInterval.decrementDateSinceWeeks(rollingWindowWeeks)

    except ValueError as err:
        redshift.closeConnection()
        payload = {"message": "{}".format(err)}
        response = {"statusCode": 400, "body": json.dumps(payload)}
        return response

    dateSince = timeInterval.getDateSinceInt()
    dateUntil = timeInterval.getDateUntilInt()

    endDate = dateUntil
    startDate = dateSince

    # Perform rolling average starting from startDate
    rollingWeeks = [startDate]

    secsPerWeek = 604800
    # Insert into rollingWeeks list all the mondays until dateUntil
    while (startDate < endDate):
        startDate += secsPerWeek
        rollingWeeks.append(startDate)

    status_list = get_completion_event_statuses(redshift, projectID)

    issueTypesList = workTypeParser.issueTypesList
    invalidResolutionsList = workTypeParser.invalidResolutionsList

    selectCompletedQuery = """
        SELECT MAX(changed) AS maxdate FROM issue_change 
        WHERE team_project_id = %s
        AND changed < %s
        AND changed >= %s
        AND new_value IN %s
        AND prev_value IN %s
        AND (%s = 0 OR issue_type IN %s)
        AND (%s = 0 OR resolution NOT IN %s)
        GROUP BY issue_key ORDER BY maxdate ASC
        """
    selectCompletedQuery = cur.mogrify(
        selectCompletedQuery,
        (projectID, rollingWeeks[-1], rollingWeeks[0],
         tuple(status_list["completed"]), tuple(
             status_list["working"]), 1 if issueTypesList else 0,
         tuple(issueTypesList) if issueTypesList else
         (None, ), 1 if invalidResolutionsList else 0,
         tuple(invalidResolutionsList) if invalidResolutionsList else
         (None, )))

    try:
        changes = redshift.executeCommitFetchAll(selectCompletedQuery)
    except Exception:
        print("Could not perform query: {}".format(selectCompletedQuery))
        redshift.closeConnection()
        payload = {"message": "Internal error"}
        response = {"statusCode": 500, "body": json.dumps(payload)}
        return response

    changeIndex = 0
    numCompletedIssues = []
    for week in rollingWeeks:
        if week == rollingWeeks[0]:
            continue
        completed = 0
        while (changeIndex < len(changes) and changes[changeIndex][0] < week):
            completed += 1
            changeIndex += 1
        numCompletedIssues.append(completed)

    rollingWeeksUsed = []

    tenthPercentiles = []
    twentiethPercentiles = []
    fiftiethPercentiles = []
    eightiethPercentiles = []
    ninetiethPercentiles = []

    # For all the weeks in rollingWeeks perform the throughput calculations moving the window
    # each time
    for index in range(len(rollingWeeks)):
        if index + rollingWindowWeeks >= len(rollingWeeks):
            # Avoids indexing out of range
            break
        numCompletedIssuesSubset = numCompletedIssues[index:index +
                                                      rollingWindowWeeks]
        sortedWeeks = sorted(numCompletedIssuesSubset)
        # Perform the calculation for each percentile
        # Percentile calculation includes linear interpolation for more accurate values
        # https://commons.apache.org/proper/commons-math/javadocs/api-3.0/org/apache/commons/math3/stat/descriptive/rank/Percentile.html
        ninetiethPercentiles.append(percentile_calculation(0.9, sortedWeeks))
        eightiethPercentiles.append(percentile_calculation(0.8, sortedWeeks))
        fiftiethPercentiles.append(percentile_calculation(0.5, sortedWeeks))
        twentiethPercentiles.append(percentile_calculation(0.2, sortedWeeks))
        tenthPercentiles.append(percentile_calculation(0.1, sortedWeeks))

        week = datetime.datetime.fromtimestamp(
            rollingWeeks[index + rollingWindowWeeks], tz=pytz.utc).isoformat()
        rollingWeeksUsed.append(week)

    # For logging purposes print out the return arrays
    print("Fiftieth Percentiles: {}".format(fiftiethPercentiles))
    print("Eightieth Percentiles: {}".format(eightiethPercentiles))
    print("Ninetieth Percentiles: {}".format(ninetiethPercentiles))

    payload = {
        "fiftieth": zip(rollingWeeksUsed, fiftiethPercentiles),
        "eightieth": zip(rollingWeeksUsed, eightiethPercentiles),
        "ninetieth": zip(rollingWeeksUsed, ninetiethPercentiles),
        "twentieth": zip(rollingWeeksUsed, twentiethPercentiles),
        "tenth": zip(rollingWeeksUsed, tenthPercentiles)
    }

    response = {
        "statusCode": 200,
        "headers": {
            "Access-Control-Allow-Origin":
            "*",  # Required for CORS support to work
            "Access-Control-Allow-Credentials":
            True  # Required for cookies, authorization headers with HTTPS
        },
        "body": json.dumps(payload)
    }

    redshift.closeConnection()
    return response
Exemplo n.º 15
0
def handler(event, context):
    # Grab the data passed to the lambda function through the browser URL (API Gateway)
    try:
        projectID = event.get('pathParameters').get('id')
    except Exception as e:
        payload = {"message": "Id path parameter not given: {]".format(e)}
        return response_formatter(status_code=404, body=payload)

    redshift = RedshiftConnection()

    try:
        redshift.validateProjectID(projectID)
    except Exception as e:
        redshift.closeConnection()
        payload = {
            "message":
            "No resource with project ID {} found: {}".format(projectID, e)
        }
        return response_formatter(status_code=404, body=payload)

    try:
        # Grab the query string parameter of offset(days), dateUntil, dateSince, and workTypes
        queryParameters = QueryParameters(event)
        quarters = queryParameters.getQuarterDates().split(',')
        workTypes = queryParameters.getWorktypes()

        workTypeParser = WorkTypeParser(workTypes, projectID)
        workTypeParser.validateWorkTypes(redshift.getCursor(),
                                         redshift.getConn())

    except ValueError as err:
        redshift.closeConnection()
        payload = {"message": "{}".format(err)}
        return response_formatter(status_code=400, body=payload)

    # Init redshift connection
    connection_detail = {
        'dbname': os.environ['DATABASE_NAME'],
        'host': os.environ["CLUSTER_ENDPOINT"],
        'port': os.environ['REDSHIFT_PORT'],
        'user': os.environ['AWS_RS_USER'],
        'password': os.environ['AWS_RS_PASS']
    }

    conn = psycopg2.connect(**connection_detail)

    # Get the sequence for start and end states for current project
    default_state_query = """
    SELECT seq_number
    FROM   team_project, team_work_states
    WHERE  team_project.id = %s
    AND    team_work_states.team_project_id = %s
    AND    (team_work_states.state_name = team_project.default_lead_time_start_state OR 
            team_work_states.state_name = team_project.default_lead_time_end_state)
    ORDER BY seq_number
    """
    with conn:
        with conn.cursor() as cur:
            cur.execute(default_state_query, (projectID, projectID))
            default_state_results = cur.fetchall()
            start_state_seq = default_state_results[0][0]
            end_state_seq = default_state_results[1][0]

    # Get all work states for current project and generate dict for lead time calculation purposes
    work_state_query = """
    SELECT state_name, seq_number
    FROM team_work_states
    WHERE team_project_id = %s
    ORDER BY seq_number
    """

    with conn:
        with conn.cursor() as cur:
            cur.execute(work_state_query, (projectID, ))
            work_states_results = cur.fetchall()
            lead_time_states = [
                work_state for work_state, work_seq in work_states_results
                if start_state_seq <= work_seq < end_state_seq
            ]
            work_states_dict = {
                work_seq: work_state
                for work_state, work_seq in work_states_results
            }

    # Filter out invalid issue types and resolutions
    issueTypesList = workTypeParser.issueTypesList
    invalidResolutionsList = workTypeParser.invalidResolutionsList

    dateSince = quarters[-1]
    dateUntil = quarters[0]

    # Get all issues within time range and their work state changing history
    issue_query = """
    SELECT issue_key,
           listagg(CASE WHEN s1.seq_number IS NULL THEN -1 ELSE s1.seq_number END,',') within group(ORDER BY issue_change.changed) AS prev_number_seq,
           listagg(CASE WHEN s2.seq_number IS NULL THEN -1 ELSE s2.seq_number END,',') within group(ORDER BY issue_change.changed) AS new_number_seq,
           listagg(issue_change.changed,',') within group(ORDER BY issue_change.changed) AS changed_seq
    FROM issue_change
      LEFT JOIN (SELECT team_status_states.team_project_id,
                        team_status_states.status,
                        team_status_states.state_name,
                        team_work_states.seq_number
                 FROM team_status_states
                   LEFT JOIN team_work_states
                          ON team_status_states.team_project_id = team_work_states.team_project_id
                         AND team_status_states.state_name = team_work_states.state_name) s1
             ON s1.team_project_id = issue_change.team_project_id
            AND s1.status = issue_change.prev_value
      LEFT JOIN (SELECT team_status_states.team_project_id,
                        team_status_states.status,
                        team_status_states.state_name,
                        team_work_states.seq_number
                 FROM team_status_states
                   LEFT JOIN team_work_states
                          ON team_status_states.team_project_id = team_work_states.team_project_id
                         AND team_status_states.state_name = team_work_states.state_name) s2
             ON s2.team_project_id = issue_change.team_project_id
            AND s2.status = issue_change.new_value
    WHERE issue_change.team_project_id = %s
    AND   field_name = 'Status'
    AND (%s = 0 OR issue_type IN %s)
    AND (%s = 0 OR resolution NOT IN %s)
    GROUP BY issue_key
    """
    with conn:
        with conn.cursor() as cur:
            cur.execute(
                issue_query,
                (projectID, 1 if issueTypesList else 0,
                 tuple(issueTypesList) if issueTypesList else
                 (None, ), 1 if invalidResolutionsList else 0,
                 tuple(invalidResolutionsList) if invalidResolutionsList else
                 (None, )))
            results = cur.fetchall()

    # Convert results to dict format
    issues = [{
        "issue_name":
        result[0],
        "raw_info":
        zip(result[1].split(","), result[2].split(","), result[3].split(",")),
        "latest_seq":
        int(result[2].split(",")[-1])
    } for result in results]

    # If latest/current status is not after lead time end state, it means issue is not done and should be filtered out
    # This keeps only finished issues in result set, meaning every issue will now have all worktime states and will be a finished issue
    issues = [
        issue for issue in issues if issue["latest_seq"] >= end_state_seq
    ]

    # still need to filter out issues that were closed before or after dateSince/DataUntil
    counter = 0
    issuesToDelete = []
    #since poping shifts the indices, each time something needs to be poped, must be subtracted by number of pops needing to be done
    numOfPops = 0

    for issue in issues:
        isIssueDeleted = False
        # Init lead time dictionary
        issue["leadtime"] = {
            el: 0
            for el in [item for item in lead_time_states]
        }
        # Find the first time to get into leadtime state from pre-leadtime state
        for info in issue["raw_info"]:
            prev_seq_number = int(info[0])
            next_seq_number = int(info[1])
            state_transition_time = int(info[2])
            if prev_seq_number < start_state_seq <= next_seq_number < end_state_seq:
                issue["start_state_time"] = state_transition_time
                break
        # Find the last time to get into post-leadtime state from leadtime state
        for info in reversed(issue["raw_info"]):
            prev_seq_number = int(info[0])
            next_seq_number = int(info[1])
            state_transition_time = int(info[2])
            if start_state_seq <= prev_seq_number < end_state_seq <= next_seq_number:
                issue["end_state_time"] = state_transition_time
                break

        #if issue was completed before or after the set amount of time passed into leadtime script, remove it from issues
        if ("end_state_time"
                in issue) and (issue["end_state_time"] < int(dateSince)
                               or issue["end_state_time"] > int(dateUntil)
                               ) and isIssueDeleted == False:
            issuesToDelete.append(counter - numOfPops)
            numOfPops = numOfPops + 1
            isIssueDeleted = True

        # Calculate overall leadtime
        if issue.get("start_state_time") and issue.get("end_state_time"):
            start_time = datetime.datetime.fromtimestamp(
                issue["start_state_time"])
            end_time = datetime.datetime.fromtimestamp(issue["end_state_time"])
            issue_working_days = TimeIntervalCalculator.workday_diff(
                start_time, end_time)
            issue["leadtime"]["Overall"] = float(
                "{0:.2f}".format(issue_working_days))
        # if needed parameters don't exist, remove from loop
        elif isIssueDeleted == False:
            issuesToDelete.append(counter - numOfPops)
            numOfPops = numOfPops + 1
            isIssueDeleted = True

        #remove issue if it is less than 15 minutes (0.01) to prevent issues from being displayed on chart as 0
        if (
                "Overall" in issue["leadtime"]
        ) and issue["leadtime"]["Overall"] < 0.01 and isIssueDeleted == False:
            issuesToDelete.append(counter - numOfPops)
            numOfPops = numOfPops + 1
            isIssueDeleted = True

        counter = counter + 1

    # Filter out if the issue did not have finish during the time period
    for num in issuesToDelete:
        issues.pop(num)

    for issue in issues:
        # Calculate lead time for each work state
        state_transition_time = -1
        # Loop through the state changing history and add up lead time for all states
        for info in issue["raw_info"]:
            prev_work_state = work_states_dict.get(int(info[0]))
            new_state_transition_time = int(info[2])
            if prev_work_state in lead_time_states and state_transition_time > 0:
                start_time = datetime.datetime.fromtimestamp(
                    state_transition_time)
                end_time = datetime.datetime.fromtimestamp(
                    new_state_transition_time)
                issue_working_days = TimeIntervalCalculator.workday_diff(
                    start_time, end_time)
                issue["leadtime"][prev_work_state] += issue_working_days

            # Update for looping purposes
            state_transition_time = new_state_transition_time

    payload = []
    #create graph data set from data
    for issue in issues:
        obj = {
            'name': issue['issue_name'],
            'workingDays': issue['leadtime']['Overall'],
            'endTime': issue['end_state_time']
        }
        payload.append(obj)

    return response_formatter(status_code=200, body=payload)
Exemplo n.º 16
0
def handler(event, context):
    # parse project_id from the URL
    try:
        project_id = (event.get('pathParameters').get('id'))
    except Exception as e:
        payload = {"message": "Could not get id path parameter"}
        return response_formatter(status_code='400', body=payload)

    pseudo_end_state_name = 'Pseudo End State'
    work_states = []

    # get board id
    try:
        redshift = RedshiftConnection()
        board_id = redshift.getBoardId(project_id)
        print(board_id)
    except:
        payload = {'message': 'Internal error'}
        return response_formatter(status_code='500', body=payload)

    JH_USER = os.environ['JH_USER']
    JH_PASS = os.environ['JH_PASS']
    JH_JIRAURL = os.environ['JH_JIRAURL']
    # connect to jira api and retrieve board configuration
    try:
        JIRA_BOARD_CONFIG_API = web_api_constants.CONFIG_URL.format(JH_JIRAURL, board_id)
        board_config = requests.get(JIRA_BOARD_CONFIG_API, auth=(JH_USER, JH_PASS)).json()

        # Ignore the first empty backlog column
        first_column = board_config['columnConfig']['columns'][0]
        if first_column.get("name") == "Backlog" and (not first_column.get("statuses")):
            board_config['columnConfig']['columns'].pop(0)

        for column in board_config['columnConfig']['columns']:
            state = {
                "name": str(column["name"]),
                "status": []
            }
            for status in column['statuses']:
                status_object = requests.get(status['self'], auth=(JH_USER, JH_PASS)).json()
                state['status'].append(str(status_object['name']))  # convert unicode string to regular string
            work_states.append(state)
            print(state)

        default_lead_time_start_state = find_default_start_state(work_states)
        default_lead_time_end_state = find_default_end_state(work_states)
    except:
        payload = {'message': 'Service unavailable'}
        return response_formatter(status_code='503', body=payload)

    # Cover edge cases when projects do not use explicitly defined column for closed tickets
    if default_lead_time_end_state == pseudo_end_state_name:
        state = {
            "name": pseudo_end_state_name,
            "status": ["Closed"]
        }
        work_states.append(state)
        print(state)

    payload = {
        "defaultLeadTimeStartState": default_lead_time_start_state,
        "defaultLeadTimeEndState": default_lead_time_end_state,
        "workStates": work_states
    }

    return response_formatter(status_code='200', body=payload)
Exemplo n.º 17
0
def handler(event, context):
    # Validate user input
    try:
        # User input
        data = json.loads(event['body'])
        team_name = data['name']

    except:
        payload = {'message': 'Invalid user input'}
        return response_formatter(status_code='404', body=payload)

    # Validate Team name
    try:
        redshift = RedshiftConnection()
        team_exists = redshift.validateTeamName(team_name)
        # If team name already exsits:
        if team_exists:
            payload = {'message': 'Team name already exists'}
            return response_formatter(status_code='400', body=payload)
    except:
        payload = {'message': 'Internal error'}
        return response_formatter(status_code='500', body=payload)
    finally:
        redshift.closeConnection()

    # Create an entry in team table
    try:
        redshift = RedshiftConnection()
        redshift.insertTeam(team_name)
    except Exception as e:
        payload = {
            'message': 'Failed to insert {} into team {}'.format(team_name, e)
        }
        return response_formatter(status_code='500', body=payload)
    finally:
        redshift.closeConnection()

    # Get id of the newly created team
    try:
        redshift = RedshiftConnection()
        team_id = redshift.getTeamId(team_name)
    except Exception as e:
        payload = {'message': 'Internal error'}
        return response_formatter(status_code='500', body=payload)
    finally:
        redshift.closeConnection()

    payload = {'name': team_name, 'id': team_id}
    return response_formatter(status_code='201', body=payload)