def push_days(documents_list): """ Add or update new hours in day collection POST /data/push_days/ Expects a list of 'day' elements. Push only one user per day. Returns { 'error' : string } """ validate_request('push_days', documents_list) validate_json_list('day', documents_list) for document in documents_list: check_datamine_permissions('push_days', document) sanified_documents_list = sanitize_objectify_json(documents_list) for sanified_document in sanified_documents_list: date = sanified_document['date'] found = db.day.find({'date': date}).limit(1).count() users = sanified_document.get('users', []) # Validate one user per day insertion if len(users) > 1: raise TSValidationError("Push only one user per day") # If the date is already present and an user is specified if found and users: user_id = users[0]['user_id'] # Drop old user data db.day.update({'date': date}, {'$pull': { 'users': { 'user_id': user_id } }}) # Push new one db.day.update({'date': date}, { '$push': { 'users': { 'user_id': user_id, 'hours': users[0]['hours'] } } }) # If the data does not already exist and there are new users data, insert it elif not found and users: db.day.insert(sanified_document) # If there is already the date but no new user data, skip it else: pass return {}
def remove(upload_ids): check_datamine_permissions('file_remove', cherrypy.request.params) # Sanify upload_id (to match with sanified documents) # TODO: check why does not objectify lists #upload_ids = sanitize_objectify_json(upload_ids) cherrypy.log('%s' % (str(upload_ids)), context='TS.FILE.REMOVE.upload_ids', severity=db_log_severity) # Requests for upload_id in upload_ids: db.upload.remove({'_id': ObjectId(upload_id)}) file_path = os.path.abspath( os.path.join(conf_uploads['folder'], str(upload_id))) if os.path.isfile(file_path): try: os.remove(file_path) except: pass else: continue cherrypy.log('Error removing file \'%s\'' % (file_path), context='TS.FILE.REMOVE.file_path', severity=db_log_severity)
def push_expences(documents_list): """ Add new expences in projects POST /data/push_expences/ Expects a list of 'project' elements having the 'project.expences' subdocument. Returns the { 'error' : string, 'ids' : [] } """ validate_request('push_expences', documents_list) validate_json_list('project', documents_list) for document in documents_list: # Check if current user is an employee of selected project # TODO: find a better way using standard check_datamine_permissions validation if cherrypy.session['_ts_user']['group'] == 'employee': found = db.project.find({ '_id' : ObjectId(document['_id']), 'employees._id' : cherrypy.session['_ts_user']['_id'] }).limit(1).count() if not found: raise TSValidationError("Access to project '%s' is restricted for current user" % (document['_id'])) check_datamine_permissions('push_expences', document) sanified_documents_list = sanitize_objectify_json(documents_list) expences_ids = [] cherrypy.log(str(sanified_documents_list), context = 'TS.PUSH_EXPENCES', severity = logging.INFO) for sanified_document in sanified_documents_list: project_id = sanified_document['_id'] found = db.project.find({ '_id' : project_id }).limit(1).count() # If found if found: for expence in sanified_document['expences']: expence_id = expence.get('_id') if expence_id != None: # If expence._id is already set, drop old expence data db.project.update({'_id': project_id }, {'$pull': {'expences': {'_id': expence_id }}}) else: # Else, generate random expence_id expence_id = ObjectId() expence['_id'] = expence_id # Push new one, only if other elements than _id are provided if len(expence.keys()) > 1: db.project.update({'_id': project_id }, {'$push' : { 'expences' : expence }}) expences_ids.append(str(expence_id)) return { 'ids' : expences_ids }
def upload(): # Check permissions check_datamine_permissions('file_upload', cherrypy.request.params) file_uploading = cherrypy.request.params['data'] uploaded_temp = tempfile.NamedTemporaryFile(mode='w+b', dir=conf_uploads['folder'], delete=False) try: while True: data = file_uploading.file.read(8192) if not data: break else: uploaded_temp.write(data) uploaded_temp.close() upload_id = str( db.upload.insert({ 'name': str(file_uploading.filename), 'content_type': cgi.escape(str(file_uploading.content_type)), 'owner': cherrypy.session['_ts_user']['username'], 'date': time.strftime("%Y-%m-%d%H:%M:%S", time.gmtime()) })) cherrypy.log( '%s %s' % (str(file_uploading.filename), str(file_uploading.content_type)), context='TS.FILE.UPLOAD', severity=db_log_severity) uploaded_temp_path = os.path.join(conf_uploads['folder'], uploaded_temp.name) uploaded_path = os.path.join(conf_uploads['folder'], upload_id) os.rename(uploaded_temp_path, uploaded_path) except Exception as e: raise return upload_id
def _find_days_by_projects(projects_input, sanified_criteria): # Prepare the aggregation pipe dates_match = { "date": { '$lte': sanified_criteria['end'], '$gte': sanified_criteria['start'] } } match_projects = {} # Match optional projects filters if projects_input: match_projects['users.hours.project'] = {'$in': projects_input} check_datamine_permissions('report_projects', match_projects) aggregation_pipe = [{ '$match': dates_match }, { '$unwind': '$users' }, { '$unwind': '$users.hours' }, { '$match': match_projects }, { '$group': { '_id': { 'user_id': '$users.user_id', 'date': '$date', 'project': '$users.hours.project', 'isextra': '$users.hours.isextra' }, 'hours': { '$sum': '$users.hours.amount' } } }] cherrypy.log(aggregation_pipe.__repr__(), context='TS.REPORT_PROJECTS.days_aggregation', severity=logging.INFO) return db.day.aggregate(aggregation_pipe)
def download(upload_id): # Check permissions check_datamine_permissions('file_download', cherrypy.request.params) upload_id = ObjectId(upload_id) json_found = db.upload.find_one({'_id': upload_id}, {'content_type': 1}) file_path = os.path.abspath( os.path.join(conf_uploads['folder'], str(upload_id))) cherrypy.log('%s' % (upload_id), context='TS.FILE.DOWNLOAD.upload_id', severity=db_log_severity) if not json_found: raise TSValidationError("File id not found") return file_path, json_found['content_type']
def search_days(criteria): """ Get day by collection POST /data/search_days/ Expects a { 'start' : 'date1', 'end' : 'date2', 'user_id' : 'user_id' } Returns { 'error' : string, 'records' : [ { }, { }, .. ] } """ validate_request('search_days', criteria) sanified_criteria = sanitize_objectify_json(criteria) user_id = sanified_criteria['user_id'] # Prepare the criteria with date range && user_id prepared_criteria = { "date": { "$gte": sanified_criteria['start'], "$lte": sanified_criteria['end'] }, "users.user_id": user_id } check_datamine_permissions('search_days', prepared_criteria) # Prepare the projection to return only date and users.date where user id is correct projection = {'date': 1, 'users': {'$elemMatch': {'user_id': user_id}}} cherrypy.log('%s\n%s' % (prepared_criteria, projection), context='TS.SEARCH_DAYS.criteria_projection', severity=logging.INFO) return { 'records': stringify_objectid_cursor(db.day.find(prepared_criteria, projection)) }
def search_tags(criteria): """ Collect tags POST /data/search_tags/ Expects { 'count' : int } Returns { 'error' : string, 'records' : [ 'tag1', 'tag2', ... ] } """ validate_request('search_tags', criteria) check_datamine_permissions('search_tags', criteria) aggregation_pipe = [{ '$unwind': '$tags' }, { '$group': { '_id': '$tags', 'count': { '$sum': 1 } } }, { '$sort': { 'count': -1 } }, { '$limit': criteria['count'] }] cherrypy.log('%s' % (aggregation_pipe), context='TS.SEARCH_TRIPS.aggregation_pipe', severity=logging.INFO) aggregation_result = db.project.aggregate(aggregation_pipe) return {'records': [record['_id'] for record in list(aggregation_result)]}
def approval(criteria): """ Approve or reject an expence POST /data/approval/ Expects { 'project_id' : string, 'user_id' : string, 'expence_id|trip_id' : string, 'action': approve|reject, 'note' : string } Returns { 'error' : string, 'status' : integer } """ validate_request('approval', criteria) sanified_criteria = sanitize_objectify_json(criteria) check_datamine_permissions('approval', sanified_criteria) # Current user can approve only approvals with status >= conf_approval_flow.index(group) owner_status = get_role_approval_step( cherrypy.session['_ts_user']['group']) draft_status = get_role_approval_step('draft') # Is searching found_expence or trip exp_id = sanified_criteria.get('expence_id') trp_id = sanified_criteria.get('trip_id') expence_type = 'expences' if exp_id else 'trips' expence_id = exp_id if exp_id else trp_id # Define match match_expence = { '_id': ObjectId(criteria['project_id']), '%s._id' % expence_type: ObjectId(expence_id), } # Define status limits only if is not adminstrator. Status should be owner_one or draft. if owner_status != 0: match_expence['$or'] = [{ '%s.status' % expence_type: owner_status }, { '%s.status' % expence_type: draft_status }] # Limit for user id user_id = sanified_criteria.get('user_id') if user_id: match_expence['%s.user_id' % expence_type] = user_id aggregation_pipe = [{ '$unwind': '$%s' % expence_type }, { '$match': match_expence }] cherrypy.log('%s' % (aggregation_pipe), context='TS.APPROVAL.aggregation_pipe', severity=logging.INFO) result = list(db.project.aggregate(aggregation_pipe)) if not result: raise TSValidationError("Can't find selected expence") found_expence = result[0] original_found_expence = found_expence.copy() # Approved if sanified_criteria['action'] == 'approve': if found_expence[expence_type]['status'] > 0: found_expence[expence_type][ 'status'] = found_expence[expence_type]['status'] - 1 # Rejected else: found_expence[expence_type]['status'] = -abs( found_expence[expence_type]['status']) if 'note' in sanified_criteria and sanified_criteria['note']: if not 'notes' in found_expence[expence_type]: found_expence[expence_type]['notes'] = [] found_expence[expence_type]['notes'].append(sanified_criteria['note']) cherrypy.log('%s' % (found_expence), context='TS.APPROVALS.found_expence', severity=logging.INFO) # Pull the original element db.project.update({'_id': ObjectId(criteria['project_id'])}, {'$pull': { expence_type: { '_id': ObjectId(expence_id) } }}) # Push the modified element, with an hack to avoid to push the entire array db.project.update({'_id': ObjectId(criteria['project_id'])}, {'$push': { expence_type: found_expence[expence_type] }}) approval_result = {} # Status approval_result['status'] = found_expence[expence_type]['status'] # Approver data approver_data = dict( (key, value) for key, value in cherrypy.session['_ts_user'].iteritems() if key in ('username', 'name', 'surname', 'email')) # Notifications notifications_result = notifications.notify_expence( found_expence, expence_type, approver_data) if notifications_result: approval_result['notifications'] = notifications_result return approval_result
def search_approvals(criteria): """ Search expences POST /data/search_approvals/ Expects { 'projects_id' : [ ], 'user_id': string, 'type': trips|expences|any, 'status': toapprove|approved|rejected|any } Returns { 'error' : string, 'records' : [] } """ validate_request('search_approvals', criteria) sanified_criteria = sanitize_objectify_json(criteria) check_datamine_permissions('search_approvals', sanified_criteria) # Get flow status number relative to current user owner_status = get_role_approval_step( cherrypy.session['_ts_user']['group']) # Search only expences or trips or both type_requested = sanified_criteria.get('type', 'any') if type_requested == 'any': aggregations_types = ['trips', 'expences'] else: aggregations_types = [type_requested] records = {'trips': [], 'expences': []} for aggregation_type in aggregations_types: # Prepare status filter status_requested = sanified_criteria.get('status', 'toapprove') # If is administrator, can see whole ranges if owner_status == 0: if status_requested == 'toapprove': match_project_status = { '%s.status' % aggregation_type: { '$gt': 0 } } elif status_requested == 'approved': match_project_status = {'%s.status' % aggregation_type: 0} elif status_requested == 'rejected': match_project_status = { '%s.status' % aggregation_type: { '$lt': 0 } } else: match_project_status = {} # If it is a permitted user, can see only specific status else: if status_requested == 'toapprove': match_project_status = { '%s.status' % aggregation_type: owner_status } elif status_requested == 'approved': match_project_status = {'%s.status' % aggregation_type: 0} elif status_requested == 'rejected': match_project_status = { '%s.status' % aggregation_type: { '$lt': 0 } } else: match_project_status = { '$or': [{ '%s.status' % aggregation_type: owner_status }, { '%s.status' % aggregation_type: 0 }, { '%s.status' % aggregation_type: { '$lt': 0 } }] } # If project_id is not set, allows only managed_projects projects_requested = [ ObjectId(p) for p in sanified_criteria.get( 'projects_id', cherrypy.session['_ts_user'] ['managed_projects']) ] if projects_requested: match_project_status.update({'_id': {'$in': projects_requested}}) user_id = sanified_criteria.get('user_id') if user_id: match_project_status.update( {'%s.user_id' % aggregation_type: user_id}) project_rename = {'%s.project_id' % aggregation_type: '$_id'} for key in schema['project']['properties'][aggregation_type]['items'][ 'properties'].keys(): project_rename['%s.%s' % (aggregation_type, key)] = 1 aggregation_pipe = [{ '$unwind': '$%s' % aggregation_type }, { '$match': match_project_status }, { '$project': project_rename }, { '$group': { '_id': '$%s' % (aggregation_type) } }, { '$sort': { '_id.start': 1, '_id.date': 1 } }] cherrypy.log('%s' % (aggregation_pipe), context='TS.SEARCH_APPROVALS.aggregation_pipe', severity=logging.INFO) aggregation_result = db.project.aggregate(aggregation_pipe) records[aggregation_type] = stringify_objectid_cursor( [record['_id'] for record in list(aggregation_result)]) return records
def report_users_hours(criteria): """ Get report grouped by users POST /data/report_users_hours/ Expects a { 'start' : '', 'end' : '', 'users' : [], 'projects' : [], hours_standard : bool, hours_extra : bool, tasks : [] } Returns { 'error' : string, 'records' : [ { }, { }, .. ] } """ validate_request('report_users_hours', criteria) sanified_criteria = sanitize_objectify_json(criteria) # Prepare the aggregation pipe dates_match = { "date": { '$lte' : sanified_criteria['end'], '$gte' : sanified_criteria['start'] } } match_users_projects_extras_tasks = { } # Match optional users if sanified_criteria['users_ids']: match_users_projects_extras_tasks['users.user_id'] = { '$in' : sanified_criteria['users_ids'] } # Match optional projects filters if sanified_criteria['projects']: match_users_projects_extras_tasks['users.hours.project'] = { '$in' : sanified_criteria['projects'] } # Match optional extra hours filter if sanified_criteria['hours_standard'] == True and sanified_criteria['hours_extra'] == False: match_users_projects_extras_tasks['users.hours.isextra'] = False elif sanified_criteria['hours_standard'] == False and sanified_criteria['hours_extra'] == True: match_users_projects_extras_tasks['users.hours.isextra'] = True # Match optional task filter if sanified_criteria['tasks']: match_users_projects_extras_tasks['users.hours.task'] = { '$in' : sanified_criteria['tasks'] } check_datamine_permissions('report_users_hours', match_users_projects_extras_tasks) aggregation_pipe = [ { '$match': dates_match }, { '$unwind' : '$users' }, { '$unwind' : '$users.hours' }, { '$match' : match_users_projects_extras_tasks }, { '$group' : { '_id' : { 'user_id' : '$users.user_id', 'date' : '$date' }, 'hours' : { '$push' : '$users.hours' } } }, { '$sort' : { '_id.user_id' : 1, '_id.date' : 1 } } ] cherrypy.log(aggregation_pipe.__repr__(), context = 'TS.REPORT_USER_HOURS.aggregation', severity = logging.INFO) aggregation_result = db.day.aggregate(aggregation_pipe) return { 'records' : stringify_objectid_cursor(list(aggregation_result)) }
def search_expences(criteria): """ Get expences POST /data/search_expences/ Expects { 'start': data, 'end': data, 'status': [ integer, integer, .. ], 'user_id': string, project_id: string, employee_id: string, 'responsible_id' : string } Returns { 'error' : string, 'records' : [ { }, { }, .. ] } """ validate_request('search_expences', criteria) check_datamine_permissions('search_expences', criteria) sanified_criteria = sanitize_objectify_json(criteria) # Prepare the aggregation pipeam projects_ids_matches = {} employee_id = sanified_criteria.get('employee_id') if employee_id != None: projects_ids_matches['employees._id'] = ObjectId(employee_id) responsible_id = sanified_criteria.get('responsible_id') if responsible_id != None: projects_ids_matches['responsibles._id'] = ObjectId(responsible_id) project_id = sanified_criteria.get('project_id') if project_id != None: projects_ids_matches['_id'] = ObjectId(project_id) trips_matches = {} user_id = sanified_criteria.get('user_id') if user_id != None: trips_matches['expences.user_id'] = user_id date_start = sanified_criteria.get('start') if date_start != None: trips_matches['expences.date'] = { '$gte' : date_start } date_end = sanified_criteria.get('end') if date_end != None: trips_matches['expences.date'] = { '$lte' : date_end } status = sanified_criteria.get('status') if status != None: trips_matches['expences.status'] = { '$in' : status } expences_rename = { 'expences.project_id' : '$_id' } for expence_key in schema['project']['properties']['expences']['items']['properties'].keys(): expences_rename['expences.%s' % expence_key] = 1 aggregation_pipe = [ { '$match': projects_ids_matches }, { '$unwind' : '$expences' }, { '$match' : trips_matches }, { '$project' : expences_rename }, { '$group' : { '_id' : '$expences' } } ] cherrypy.log('%s' % (aggregation_pipe), context = 'TS.SEARCH_EXPENCES.aggregation_pipe', severity = logging.INFO) aggregation_result = db.project.aggregate(aggregation_pipe) return { 'records' : stringify_objectid_cursor([ record['_id'] for record in list(aggregation_result) ]) }