def push_days(documents_list): """ Add or update new hours in day collection POST /data/push_days/ Expects a list of 'day' elements. Push only one user per day. Returns { 'error' : string } """ validate_request('push_days', documents_list) validate_json_list('day', documents_list) for document in documents_list: check_datamine_permissions('push_days', document) sanified_documents_list = sanitize_objectify_json(documents_list) for sanified_document in sanified_documents_list: date = sanified_document['date'] found = db.day.find({'date': date}).limit(1).count() users = sanified_document.get('users', []) # Validate one user per day insertion if len(users) > 1: raise TSValidationError("Push only one user per day") # If the date is already present and an user is specified if found and users: user_id = users[0]['user_id'] # Drop old user data db.day.update({'date': date}, {'$pull': { 'users': { 'user_id': user_id } }}) # Push new one db.day.update({'date': date}, { '$push': { 'users': { 'user_id': user_id, 'hours': users[0]['hours'] } } }) # If the data does not already exist and there are new users data, insert it elif not found and users: db.day.insert(sanified_document) # If there is already the date but no new user data, skip it else: pass return {}
def _migrate_user_to_db(ldap_result_dict, password, group): """Migrate LDAP user informations to database. Password is not inserted""" user_dict = { 'name': ldap_result_dict.get('givenName', [''])[0], 'surname': ldap_result_dict.get('sn', [''])[0], 'username': ldap_result_dict.get('uid', [''])[0], 'email': ldap_result_dict.get('mail', [''])[0], 'phone': ldap_result_dict.get('phone', [''])[0], 'mobile': ldap_result_dict.get('mobile', [''])[0], 'city': ldap_result_dict.get('city', [''])[0], 'group': group, 'password': '', 'salary': [], 'salt': '', 'status': 'active' } # Copy user_dict with a fake password to permit correct validation user_dict_copy = user_dict.copy() user_dict_copy['password'] = '******' validate_json_list('user', [user_dict_copy]) sanified_user_dict = sanitize_objectify_json(user_dict) return db['user'].insert(user_dict)
def push_expences(documents_list): """ Add new expences in projects POST /data/push_expences/ Expects a list of 'project' elements having the 'project.expences' subdocument. Returns the { 'error' : string, 'ids' : [] } """ validate_request('push_expences', documents_list) validate_json_list('project', documents_list) for document in documents_list: # Check if current user is an employee of selected project # TODO: find a better way using standard check_datamine_permissions validation if cherrypy.session['_ts_user']['group'] == 'employee': found = db.project.find({ '_id' : ObjectId(document['_id']), 'employees._id' : cherrypy.session['_ts_user']['_id'] }).limit(1).count() if not found: raise TSValidationError("Access to project '%s' is restricted for current user" % (document['_id'])) check_datamine_permissions('push_expences', document) sanified_documents_list = sanitize_objectify_json(documents_list) expences_ids = [] cherrypy.log(str(sanified_documents_list), context = 'TS.PUSH_EXPENCES', severity = logging.INFO) for sanified_document in sanified_documents_list: project_id = sanified_document['_id'] found = db.project.find({ '_id' : project_id }).limit(1).count() # If found if found: for expence in sanified_document['expences']: expence_id = expence.get('_id') if expence_id != None: # If expence._id is already set, drop old expence data db.project.update({'_id': project_id }, {'$pull': {'expences': {'_id': expence_id }}}) else: # Else, generate random expence_id expence_id = ObjectId() expence['_id'] = expence_id # Push new one, only if other elements than _id are provided if len(expence.keys()) > 1: db.project.update({'_id': project_id }, {'$push' : { 'expences' : expence }}) expences_ids.append(str(expence_id)) return { 'ids' : expences_ids }
def _add_default_admin(db): if '--add-user-ldap' in sys.argv and len(sys.argv) > sys.argv.index( '--add-user-ldap') + 1 and '--ldap' in sys.argv and len( sys.argv) > sys.argv.index('--ldap') + 2: arg_index = sys.argv.index('--ldap') username = sys.argv[arg_index + 1] password = sys.argv[arg_index + 2] arg_index = sys.argv.index('--add-user-ldap') group = sys.argv[arg_index + 1] print 'OK!\n[+] Adding credentials ' + username + ' in group ' + group + ' from LDAP', auth_err = None try: auth_err = check_credentials(username, password, migrate=True, group_on_migration=group) except Exception as e: auth_err = str(e) elif '--add-user' in sys.argv and len( sys.argv) > sys.argv.index('--add-user') + 3: arg_index = sys.argv.index('--add-user') username = sys.argv[arg_index + 1] password = sys.argv[arg_index + 2] group = sys.argv[arg_index + 3] print 'OK!\n[+] Adding credentials ' + username + ':' + password + ' in group ' + group, json_user = { 'password': password, 'name': username, 'surname': 'Default', 'username': username, 'email': 'admin@localhost', 'phone': '', 'mobile': '', 'city': '', 'group': group, 'salary': [], 'status': 'active' } validate_json_list('user', [json_user]) if group == 'administrator': json_user['_id'] = '1' * 24 sanified_json_user = sanitize_objectify_json(json_user) update_password_salt_user_json(sanified_json_user) db.user.insert(sanified_json_user) else: print 'OK!\n[-] Skipping user insert, use \'--add-user <usr> <pwd> <group>\' or \'--ldap <usr> <pwd> --add-user-ldap <group>\''
def update(collection, document): """Update an inserted record Called by POST /update/<collection>/""" # Check request format validate_request('update', document) validate_json_list(collection, [ document ]) check_upsert_permissions('update', collection, document) sanified_document = sanitize_objectify_json(document) cherrypy.log('%s' % (sanified_document), context = 'TS.UPDATE.%s.document' % collection, severity = db_log_severity) db_collection = db[collection].find_one({ '_id' : sanified_document['_id'] }) db[collection].update({ '_id' : sanified_document['_id'] }, recursive_merge(db_collection, sanified_document))
def get(collection, criteria_projection_order): """Get selected records from collection, and return it as json Called by GET /<collection>/""" # Check request format validate_request('get', criteria_projection_order) # Check permissions check_get_permissions(collection, criteria_projection_order[0], criteria_projection_order[1], criteria_projection_order[2]) # Sanify criteria (to match with sanified documents) sanified_criteria = sanitize_objectify_json(criteria_projection_order[0]) cherrypy.log('%s' % (criteria_projection_order), context = 'TS.GET.%s.criteria_projection_order' % collection, severity = db_log_severity) # Request return stringify_objectid_cursor(db[collection].find( { '$query' : sanified_criteria, '$orderby' : criteria_projection_order[2] }, criteria_projection_order[1]))
def remove(collection, criterias = []): """Remove selected records from collection Called by POST /remove/<collection>""" # Check request format validate_request('remove', criterias) # Check permissions before requests for criteria in criterias: check_remove_permissions(collection, criteria) # Sanify criteria (to match with sanified documents) sanified_criterias = sanitize_objectify_json(criterias) cherrypy.log('%s' % (criterias), context = 'TS.REMOVE.%s.criteria' % collection, severity = db_log_severity) # Requests for criteria in sanified_criterias: db[collection].remove(criteria)
def add(collection, documents_list): """Insert new record list to collection Called by POST /add/<collection>/""" # Check request format validate_request('add', documents_list) validate_json_list(collection, documents_list) for document in documents_list: check_upsert_permissions('add', collection, document) # Sanify documents sanified_documents_list = sanitize_objectify_json(documents_list) # Eventually rewrite password and salt update_password_salt_user_list(collection, sanified_documents_list) cherrypy.log('%s' % (sanified_documents_list), context = 'TS.ADD.%s.documents' % collection, severity = db_log_severity) # Request return stringify_objectid_list(db[collection].insert(sanified_documents_list))
def search_days(criteria): """ Get day by collection POST /data/search_days/ Expects a { 'start' : 'date1', 'end' : 'date2', 'user_id' : 'user_id' } Returns { 'error' : string, 'records' : [ { }, { }, .. ] } """ validate_request('search_days', criteria) sanified_criteria = sanitize_objectify_json(criteria) user_id = sanified_criteria['user_id'] # Prepare the criteria with date range && user_id prepared_criteria = { "date": { "$gte": sanified_criteria['start'], "$lte": sanified_criteria['end'] }, "users.user_id": user_id } check_datamine_permissions('search_days', prepared_criteria) # Prepare the projection to return only date and users.date where user id is correct projection = {'date': 1, 'users': {'$elemMatch': {'user_id': user_id}}} cherrypy.log('%s\n%s' % (prepared_criteria, projection), context='TS.SEARCH_DAYS.criteria_projection', severity=logging.INFO) return { 'records': stringify_objectid_cursor(db.day.find(prepared_criteria, projection)) }
def approval(criteria): """ Approve or reject an expence POST /data/approval/ Expects { 'project_id' : string, 'user_id' : string, 'expence_id|trip_id' : string, 'action': approve|reject, 'note' : string } Returns { 'error' : string, 'status' : integer } """ validate_request('approval', criteria) sanified_criteria = sanitize_objectify_json(criteria) check_datamine_permissions('approval', sanified_criteria) # Current user can approve only approvals with status >= conf_approval_flow.index(group) owner_status = get_role_approval_step( cherrypy.session['_ts_user']['group']) draft_status = get_role_approval_step('draft') # Is searching found_expence or trip exp_id = sanified_criteria.get('expence_id') trp_id = sanified_criteria.get('trip_id') expence_type = 'expences' if exp_id else 'trips' expence_id = exp_id if exp_id else trp_id # Define match match_expence = { '_id': ObjectId(criteria['project_id']), '%s._id' % expence_type: ObjectId(expence_id), } # Define status limits only if is not adminstrator. Status should be owner_one or draft. if owner_status != 0: match_expence['$or'] = [{ '%s.status' % expence_type: owner_status }, { '%s.status' % expence_type: draft_status }] # Limit for user id user_id = sanified_criteria.get('user_id') if user_id: match_expence['%s.user_id' % expence_type] = user_id aggregation_pipe = [{ '$unwind': '$%s' % expence_type }, { '$match': match_expence }] cherrypy.log('%s' % (aggregation_pipe), context='TS.APPROVAL.aggregation_pipe', severity=logging.INFO) result = list(db.project.aggregate(aggregation_pipe)) if not result: raise TSValidationError("Can't find selected expence") found_expence = result[0] original_found_expence = found_expence.copy() # Approved if sanified_criteria['action'] == 'approve': if found_expence[expence_type]['status'] > 0: found_expence[expence_type][ 'status'] = found_expence[expence_type]['status'] - 1 # Rejected else: found_expence[expence_type]['status'] = -abs( found_expence[expence_type]['status']) if 'note' in sanified_criteria and sanified_criteria['note']: if not 'notes' in found_expence[expence_type]: found_expence[expence_type]['notes'] = [] found_expence[expence_type]['notes'].append(sanified_criteria['note']) cherrypy.log('%s' % (found_expence), context='TS.APPROVALS.found_expence', severity=logging.INFO) # Pull the original element db.project.update({'_id': ObjectId(criteria['project_id'])}, {'$pull': { expence_type: { '_id': ObjectId(expence_id) } }}) # Push the modified element, with an hack to avoid to push the entire array db.project.update({'_id': ObjectId(criteria['project_id'])}, {'$push': { expence_type: found_expence[expence_type] }}) approval_result = {} # Status approval_result['status'] = found_expence[expence_type]['status'] # Approver data approver_data = dict( (key, value) for key, value in cherrypy.session['_ts_user'].iteritems() if key in ('username', 'name', 'surname', 'email')) # Notifications notifications_result = notifications.notify_expence( found_expence, expence_type, approver_data) if notifications_result: approval_result['notifications'] = notifications_result return approval_result
def search_approvals(criteria): """ Search expences POST /data/search_approvals/ Expects { 'projects_id' : [ ], 'user_id': string, 'type': trips|expences|any, 'status': toapprove|approved|rejected|any } Returns { 'error' : string, 'records' : [] } """ validate_request('search_approvals', criteria) sanified_criteria = sanitize_objectify_json(criteria) check_datamine_permissions('search_approvals', sanified_criteria) # Get flow status number relative to current user owner_status = get_role_approval_step( cherrypy.session['_ts_user']['group']) # Search only expences or trips or both type_requested = sanified_criteria.get('type', 'any') if type_requested == 'any': aggregations_types = ['trips', 'expences'] else: aggregations_types = [type_requested] records = {'trips': [], 'expences': []} for aggregation_type in aggregations_types: # Prepare status filter status_requested = sanified_criteria.get('status', 'toapprove') # If is administrator, can see whole ranges if owner_status == 0: if status_requested == 'toapprove': match_project_status = { '%s.status' % aggregation_type: { '$gt': 0 } } elif status_requested == 'approved': match_project_status = {'%s.status' % aggregation_type: 0} elif status_requested == 'rejected': match_project_status = { '%s.status' % aggregation_type: { '$lt': 0 } } else: match_project_status = {} # If it is a permitted user, can see only specific status else: if status_requested == 'toapprove': match_project_status = { '%s.status' % aggregation_type: owner_status } elif status_requested == 'approved': match_project_status = {'%s.status' % aggregation_type: 0} elif status_requested == 'rejected': match_project_status = { '%s.status' % aggregation_type: { '$lt': 0 } } else: match_project_status = { '$or': [{ '%s.status' % aggregation_type: owner_status }, { '%s.status' % aggregation_type: 0 }, { '%s.status' % aggregation_type: { '$lt': 0 } }] } # If project_id is not set, allows only managed_projects projects_requested = [ ObjectId(p) for p in sanified_criteria.get( 'projects_id', cherrypy.session['_ts_user'] ['managed_projects']) ] if projects_requested: match_project_status.update({'_id': {'$in': projects_requested}}) user_id = sanified_criteria.get('user_id') if user_id: match_project_status.update( {'%s.user_id' % aggregation_type: user_id}) project_rename = {'%s.project_id' % aggregation_type: '$_id'} for key in schema['project']['properties'][aggregation_type]['items'][ 'properties'].keys(): project_rename['%s.%s' % (aggregation_type, key)] = 1 aggregation_pipe = [{ '$unwind': '$%s' % aggregation_type }, { '$match': match_project_status }, { '$project': project_rename }, { '$group': { '_id': '$%s' % (aggregation_type) } }, { '$sort': { '_id.start': 1, '_id.date': 1 } }] cherrypy.log('%s' % (aggregation_pipe), context='TS.SEARCH_APPROVALS.aggregation_pipe', severity=logging.INFO) aggregation_result = db.project.aggregate(aggregation_pipe) records[aggregation_type] = stringify_objectid_cursor( [record['_id'] for record in list(aggregation_result)]) return records
def report_users_hours(criteria): """ Get report grouped by users POST /data/report_users_hours/ Expects a { 'start' : '', 'end' : '', 'users' : [], 'projects' : [], hours_standard : bool, hours_extra : bool, tasks : [] } Returns { 'error' : string, 'records' : [ { }, { }, .. ] } """ validate_request('report_users_hours', criteria) sanified_criteria = sanitize_objectify_json(criteria) # Prepare the aggregation pipe dates_match = { "date": { '$lte' : sanified_criteria['end'], '$gte' : sanified_criteria['start'] } } match_users_projects_extras_tasks = { } # Match optional users if sanified_criteria['users_ids']: match_users_projects_extras_tasks['users.user_id'] = { '$in' : sanified_criteria['users_ids'] } # Match optional projects filters if sanified_criteria['projects']: match_users_projects_extras_tasks['users.hours.project'] = { '$in' : sanified_criteria['projects'] } # Match optional extra hours filter if sanified_criteria['hours_standard'] == True and sanified_criteria['hours_extra'] == False: match_users_projects_extras_tasks['users.hours.isextra'] = False elif sanified_criteria['hours_standard'] == False and sanified_criteria['hours_extra'] == True: match_users_projects_extras_tasks['users.hours.isextra'] = True # Match optional task filter if sanified_criteria['tasks']: match_users_projects_extras_tasks['users.hours.task'] = { '$in' : sanified_criteria['tasks'] } check_datamine_permissions('report_users_hours', match_users_projects_extras_tasks) aggregation_pipe = [ { '$match': dates_match }, { '$unwind' : '$users' }, { '$unwind' : '$users.hours' }, { '$match' : match_users_projects_extras_tasks }, { '$group' : { '_id' : { 'user_id' : '$users.user_id', 'date' : '$date' }, 'hours' : { '$push' : '$users.hours' } } }, { '$sort' : { '_id.user_id' : 1, '_id.date' : 1 } } ] cherrypy.log(aggregation_pipe.__repr__(), context = 'TS.REPORT_USER_HOURS.aggregation', severity = logging.INFO) aggregation_result = db.day.aggregate(aggregation_pipe) return { 'records' : stringify_objectid_cursor(list(aggregation_result)) }
def search_expences(criteria): """ Get expences POST /data/search_expences/ Expects { 'start': data, 'end': data, 'status': [ integer, integer, .. ], 'user_id': string, project_id: string, employee_id: string, 'responsible_id' : string } Returns { 'error' : string, 'records' : [ { }, { }, .. ] } """ validate_request('search_expences', criteria) check_datamine_permissions('search_expences', criteria) sanified_criteria = sanitize_objectify_json(criteria) # Prepare the aggregation pipeam projects_ids_matches = {} employee_id = sanified_criteria.get('employee_id') if employee_id != None: projects_ids_matches['employees._id'] = ObjectId(employee_id) responsible_id = sanified_criteria.get('responsible_id') if responsible_id != None: projects_ids_matches['responsibles._id'] = ObjectId(responsible_id) project_id = sanified_criteria.get('project_id') if project_id != None: projects_ids_matches['_id'] = ObjectId(project_id) trips_matches = {} user_id = sanified_criteria.get('user_id') if user_id != None: trips_matches['expences.user_id'] = user_id date_start = sanified_criteria.get('start') if date_start != None: trips_matches['expences.date'] = { '$gte' : date_start } date_end = sanified_criteria.get('end') if date_end != None: trips_matches['expences.date'] = { '$lte' : date_end } status = sanified_criteria.get('status') if status != None: trips_matches['expences.status'] = { '$in' : status } expences_rename = { 'expences.project_id' : '$_id' } for expence_key in schema['project']['properties']['expences']['items']['properties'].keys(): expences_rename['expences.%s' % expence_key] = 1 aggregation_pipe = [ { '$match': projects_ids_matches }, { '$unwind' : '$expences' }, { '$match' : trips_matches }, { '$project' : expences_rename }, { '$group' : { '_id' : '$expences' } } ] cherrypy.log('%s' % (aggregation_pipe), context = 'TS.SEARCH_EXPENCES.aggregation_pipe', severity = logging.INFO) aggregation_result = db.project.aggregate(aggregation_pipe) return { 'records' : stringify_objectid_cursor([ record['_id'] for record in list(aggregation_result) ]) }
def report_projects(criteria): """ Get projects report POST /data/report_projects/ Expects { 'start' : '', 'end' : '', 'customers' : [], 'projects' : [], 'tags' : [], 'mode' : 'total|project' } Returns with mode total { 'error' : string, 'records' : [ [ 'YYYY-MM', 2 ], [ 'YYYY-MM', 4 ], .. ] } Returns with mode total { 'error' : string, 'records' : { 'proj1' : [ [ 'YYYY-MM', 2 ], [ 'YYYY-MM', 4 ] ], .. } } """ def _find_project_list_by_customers_types(sanified_criteria): projects_input = sanified_criteria.get('projects', []) # Add projects by customers customers_input = sanified_criteria.get('customers') if customers_input: customer_projects = db.project.find( {'customer': { '$in': customers_input }}, {'_id': 1}) for project in customer_projects: projects_input.append(str(project['_id'])) # Add projects by tags types_input = sanified_criteria.get('tags') if types_input: types_projects = db.project.find({'tags': { '$in': types_input }}, {'_id': 1}) for project in types_projects: projects_input.append(str(project['_id'])) return projects_input def _find_days_by_projects(projects_input, sanified_criteria): # Prepare the aggregation pipe dates_match = { "date": { '$lte': sanified_criteria['end'], '$gte': sanified_criteria['start'] } } match_projects = {} # Match optional projects filters if projects_input: match_projects['users.hours.project'] = {'$in': projects_input} check_datamine_permissions('report_projects', match_projects) aggregation_pipe = [{ '$match': dates_match }, { '$unwind': '$users' }, { '$unwind': '$users.hours' }, { '$match': match_projects }, { '$group': { '_id': { 'user_id': '$users.user_id', 'date': '$date', 'project': '$users.hours.project', 'isextra': '$users.hours.isextra' }, 'hours': { '$sum': '$users.hours.amount' } } }] cherrypy.log(aggregation_pipe.__repr__(), context='TS.REPORT_PROJECTS.days_aggregation', severity=logging.INFO) return db.day.aggregate(aggregation_pipe) def _find_salaries_by_date_users(days_ids_list, end, start): # TODO: misure if _id filter now is superflous due to filter check during merge aggregation_pipe = [{ '$match': { 'salary.cost': { '$gt': 0 }, '_id': { '$in': days_ids_list } } }, { '$project': { 'salary.from': 1, 'salary.to': 1, 'salary.cost': 1, '_id': 1 } }] cherrypy.log(aggregation_pipe.__repr__(), context='TS.REPORT_PROJECTS.salaries_aggregation', severity=logging.INFO) return db.user.aggregate(aggregation_pipe) def _find_incomes_by_project_date(projects_input, end, start): objectified_projects_input = [ObjectId(p) for p in projects_input] if projects_input: match_projects = {'_id': {'$in': objectified_projects_input}} else: match_projects = {} aggregation_pipe = [{ '$match': match_projects }, { '$unwind': '$economics' }, { '$match': { 'economics.period': { '$gte': start, '$lte': end } } }, { '$group': { '_id': { 'project_id': '$_id', 'period': '$economics.period', 'budget': '$economics.budget', 'extra': '$economics.extra' } } }] cherrypy.log(aggregation_pipe.__repr__(), context='TS.REPORT_PROJECTS.incomes_aggregation', severity=logging.INFO) return db.project.aggregate(aggregation_pipe) def _find_costs_by_project_date(projects_input, end, start): objectified_projects_input = [ObjectId(p) for p in projects_input] if projects_input: match_projects = {'_id': {'$in': objectified_projects_input}} else: match_projects = {} aggregation_pipe = [{ '$match': match_projects }, { '$unwind': '$expences' }, { '$match': { 'expences.status': conf_approved } }, { '$unwind': '$expences.objects' }, { '$match': { 'expences.objects.date': { '$gte': start, '$lte': end } } }, { '$group': { '_id': { 'project_id': '$_id', 'date': '$expences.objects.date' }, 'amount': { '$sum': '$expences.objects.amount' } } }] cherrypy.log(aggregation_pipe.__repr__(), context='TS.REPORT_PROJECTS.costs_aggregation', severity=logging.INFO) return db.project.aggregate(aggregation_pipe) def _merge_total(days_result, salaries_result, budget_result, costs_result): total_costs = {} for day in days_result: user_hours_record = day.get('_id', {}) user_id = user_hours_record.get('user_id') project = user_hours_record.get('project') isextra = user_hours_record.get('isextra') user_date = user_hours_record.get('date') user_YM = '-'.join(user_date.split('-')[:2]) user_hours = day.get('hours', 0) salary = next( (sal['salary'][0]['cost'] for sal in salaries_result if sal['_id'] == ObjectId(user_id) and sal['salary'][0]['from'] <= user_date and sal['salary'][0]['to'] >= user_date), 0) if salary: # If the hour block is extra, add the multiplier_on_extras to itself if isextra: salary *= conf_reports['multiplier_on_extras'] if not total_costs.get(user_YM): total_costs[user_YM] = { 'salary': 0, 'budget': 0, 'extra_budget': 0, 'costs': 0 } total_costs[user_YM]['salary'] += round(salary * user_hours, 2) for budget in budget_result: budget_YM = '-'.join(budget['_id']['period'].split('-')[:2]) if not total_costs.get(budget_YM): total_costs[budget_YM] = { 'salary': 0, 'budget': 0, 'extra_budget': 0, 'costs': 0 } total_costs[budget_YM]['budget'] += budget['_id']['budget'] total_costs[budget_YM]['extra_budget'] += budget['_id']['extra'] for costs in costs_result: costs_YM = '-'.join(costs['_id']['date'].split('-')[:2]) if not total_costs.get(costs_YM): total_costs[costs_YM] = { 'salary': 0, 'budget': 0, 'extra_budget': 0, 'costs': 0 } total_costs[costs_YM]['costs'] += costs['amount'] ## ORDER output_costs_list = [] for ym in sorted(total_costs.keys()): output_costs_list.append((ym, total_costs[ym])) return output_costs_list def _merge_by_project(days_result, salaries_result, budget_result, costs_result): ### MERGE project_costs = {} for day in days_result: user_hours_record = day.get('_id', {}) user_id = user_hours_record.get('user_id') project = user_hours_record.get('project') isextra = user_hours_record.get('isextra') user_date = user_hours_record.get('date') user_YM = '-'.join(user_date.split('-')[:2]) user_hours = day.get('hours', 0) salary = next( (sal['salary'][0]['cost'] for sal in salaries_result if sal['_id'] == ObjectId(user_id) and sal['salary'][0]['from'] <= user_date and sal['salary'][0]['to'] >= user_date), 0) if salary: if not project_costs.get(project): project_costs[project] = {user_YM: {}} if not project_costs[project].get(user_YM): project_costs[project][user_YM] = { 'salary': 0, 'budget': 0, 'extra_budget': 0, 'costs': 0 } # If the hour block is extra, add the multiplier_on_extras to itself if isextra: salary *= conf_reports['multiplier_on_extras'] # While can exists multiple costs for a project-month, cannot exists multiple badges or extras project_costs[project][user_YM]['salary'] = project_costs[ project][user_YM]['salary'] + round( salary * user_hours, 2) for budget in budget_result: budget_YM = '-'.join(budget['_id']['period'].split('-')[:2]) project = str(budget['_id']['project_id']) if not project_costs.get(project): project_costs[project] = {budget_YM: {}} if not project_costs[project].get(budget_YM): project_costs[project][budget_YM] = { 'salary': 0, 'budget': 0, 'extra_budget': 0, 'costs': 0 } project_costs[project][budget_YM]['budget'] = budget['_id'][ 'budget'] project_costs[project][budget_YM]['extra_budget'] = budget['_id'][ 'extra'] for costs in costs_result: costs_YM = '-'.join(costs['_id']['date'].split('-')[:2]) project = str(costs['_id']['project_id']) if not project_costs.get(project): project_costs[project] = {costs_YM: {}} if not project_costs[project].get(costs_YM): project_costs[project][costs_YM] = { 'salary': 0, 'budget': 0, 'extra_budget': 0, 'costs': 0 } project_costs[project][costs_YM]['costs'] += costs['amount'] ## ORDER output_costs_dict = {} for project in project_costs.keys(): output_costs_dict[project] = [] for ym in sorted(project_costs[project].keys()): output_costs_dict[project].append( (ym, project_costs[project][ym])) return output_costs_dict validate_request('report_projects', criteria) sanified_criteria = sanitize_objectify_json(criteria) aggregation_mode = sanified_criteria.get('mode', 'total') # Find project list projects_input = _find_project_list_by_customers_types(sanified_criteria) # Day mining days_result = list( _find_days_by_projects(projects_input, sanified_criteria)) days_ids_list = [ObjectId(r['_id']['user_id']) for r in days_result] # Salary mining salaries_result = list( _find_salaries_by_date_users(days_ids_list, sanified_criteria['end'], sanified_criteria['start'])) # Project budget extra mining budget_result = list( _find_incomes_by_project_date(projects_input, sanified_criteria['end'], sanified_criteria['start'])) # Cost mining costs_result = list( _find_costs_by_project_date(projects_input, sanified_criteria['end'], sanified_criteria['start'])) if aggregation_mode == 'total': return { 'records': _merge_total(days_result, salaries_result, budget_result, costs_result) } elif aggregation_mode == 'project': return { 'records': _merge_by_project(days_result, salaries_result, budget_result, costs_result) }