def on_message(client, userdata, msg): # pylint: disable=unused-argument # print('MQTT: %s %s' % (msg.topic, msg.payload.decode())) message_struct = json.loads(msg.payload.decode()) message_type = message_struct['type'] if message_type == 'update_sequence': controller = find_resource('/' + msg.topic) # for now we assume these messages are published on controller channels if controller and controller.type == Resource.CONTROLLER_FOLDER: parameters = message_struct['parameters'] seq_name = parameters['sequence'] if not seq_name.startswith('/'): # handle relative sequence names resource = Resource.query.filter(Resource.id == controller.id).one() # this is ok for now since .. doesn't have special meaning in resource path (no way to escape controller folder) seq_name = resource.path() + '/' + seq_name timestamp = parameters.get('timestamp', '') # fix(soon): need to convert to datetime if not timestamp: timestamp = datetime.datetime.utcnow() value = parameters['value'] if 'encoded' in parameters: value = base64.b64decode(value) # remove this; require clients to use REST POST for images resource = find_resource(seq_name) if not resource: return system_attributes = json.loads(resource.system_attributes) if resource.system_attributes else None if system_attributes and system_attributes['data_type'] == Resource.IMAGE_SEQUENCE: value = base64.b64decode(value) else: value = str(value) # don't emit message since the message is already in the system update_sequence_value(resource, seq_name, timestamp, value, emit_message=False) db.session.commit() # update controller watchdog status elif message_type == 'watchdog': controller = find_resource('/' + msg.topic) # for now we assume these messages are published on controller channels if controller and controller.type == Resource.CONTROLLER_FOLDER: controller_status = ControllerStatus.query.filter(ControllerStatus.id == controller.id).one() controller_status.last_watchdog_timestamp = datetime.datetime.utcnow() db.session.commit() # send emails elif message_type == 'send_email': controller = find_resource('/' + msg.topic) # for now we assume these messages are published on controller channels if controller and controller.type == Resource.CONTROLLER_FOLDER: print('sending email') handle_send_email(controller.id, message_struct['parameters']) # send SMS messages elif message_type == 'send_sms' or message_type == 'send_text_message': controller = find_resource('/' + msg.topic) # for now we assume these messages are published on controller channels if controller and controller.type == Resource.CONTROLLER_FOLDER: handle_send_text_message(controller.id, message_struct['parameters'])
def put(self, pin): # we assume PIN entered by human user if current_user.is_anonymous: abort(403) # get controller controller_path = request.values['controller'] controller = find_resource(controller_path) if not controller: abort(400) if access_level(controller.query_permissions()) < ACCESS_LEVEL_WRITE: abort(403) # get PIN record # fix(soon): how do we avoid abuse of this? (we do know user and org) try: pin_record = Pin.query.filter(Pin.pin == pin).one() except NoResultFound: abort(404) # make sure it hasn't expired if pin_record.creation_timestamp < datetime.datetime.utcnow() - datetime.timedelta(minutes = 30): return {'status': 'error', 'message': 'PIN has expired.'} # assocate pin with controller (and update other fields) pin_record.enter_timestamp = datetime.datetime.utcnow() pin_record.user_id = current_user.id pin_record.controller_id = controller.id db.session.commit() return {'status': 'ok'}
def post(self, resource_path): # note: should check parent permissions, not org permissions, but no need to fix since we'll delete this code org_name = resource_path.split('/')[0] org_resource = find_resource('/' + org_name) if not org_resource: abort(403) if access_level(org_resource.query_permissions()) < ACCESS_LEVEL_WRITE: abort(403) # get attributes of new file args = request.values if 'contents' in args: data = base64.b64decode(args['contents']) # fix(clean): remove this case else: data = base64.b64decode(args.get('data', '')) if 'creation_timestamp' in args: creation_timestamp = parse_json_datetime(args['creation_timestamp']) else: creation_timestamp = datetime.datetime.utcnow() if 'modification_timestamp' in args: modification_timestamp = parse_json_datetime(args['modification_timestamp']) else: modification_timestamp = creation_timestamp # create the file or folder r = _create_file(resource_path, creation_timestamp, modification_timestamp, data) return {'status': 'ok', 'id': r.id}
def put(self): values = json.loads(request.values['values']) if 'timestamp' in request.values: timestamp = parse_json_datetime(request.values['timestamp']) # check for drift delta = datetime.datetime.utcnow() - timestamp drift = delta.total_seconds() # print 'drift', drift if abs(drift) > 30: # get current controller correction # fix(later): support user updates as well? auth = request.authorization key = find_key_fast(auth.password) # key is provided as HTTP basic auth password if key and key.access_as_controller_id: controller_id = key.access_as_controller_id controller_status = ControllerStatus.query.filter(ControllerStatus.id == controller_id).one() attributes = json.loads(controller_status.attributes) correction = attributes.get('timestamp_correction', 0) # if stored correction is reasonable, use it; otherwise store new correction if abs(correction - drift) > 100: correction = drift attributes['timestamp_correction'] = drift controller_status.attributes = json.dumps(attributes) db.session.commit() # print 'storing new correction (%.2f)' % correction else: pass # print 'applying previous correction (%.2f)' % correction timestamp += datetime.timedelta(seconds=correction) else: timestamp = datetime.datetime.utcnow() # for now, assume all sequences in same folder items = list(values.items()) if items: items = sorted(items) # sort by keys so we can re-use folder lookup and permission check between items in same folder folder_resource = None folder_name = None for (full_name, value) in items: item_folder_name = full_name.rsplit('/', 1)[0] if item_folder_name != folder_name: # if this folder doesn't match the folder resource record we have folder_name = item_folder_name folder_resource = find_resource(folder_name) if folder_resource and access_level(folder_resource.query_permissions()) < ACCESS_LEVEL_WRITE: folder_resource = None # don't have write access if folder_resource: seq_name = full_name.rsplit('/', 1)[1] try: resource = ( Resource.query .filter(Resource.parent_id == folder_resource.id, Resource.name == seq_name, not_(Resource.deleted)) .one() ) update_sequence_value(resource, full_name, timestamp, str(value), emit_message=True) # fix(later): revisit emit_message except NoResultFound: pass db.session.commit()
def post(self): folder_path = request.values.get('folderPath', request.values.get('folder_path', '')) if not folder_path: abort(400) if not folder_path.startswith('/'): abort(400) folder = find_resource(folder_path) if not folder: abort(404) if access_level(folder.query_permissions()) < ACCESS_LEVEL_WRITE: abort(403) if not request.authorization: abort(403) key = find_key(request.authorization.password) if not key: abort(403) message_type = request.values['type'] parameters = json.loads(request.values['parameters']) sender_controller_id = key.access_as_controller_id # None if access as user sender_user_id = key.access_as_user_id # None if access as controller message_queue.add(folder.id, None, message_type, parameters, sender_controller_id=sender_controller_id, sender_user_id=sender_user_id) return {'status': 'ok'}
def post(self): folder_path = request.values.get('folderPath', request.values.get('folder_path', '')) if not folder_path: abort(400) folder = find_resource(folder_path) if not folder: abort(404) if access_level(folder.query_permissions()) < ACCESS_LEVEL_WRITE: abort(403) type = request.values['type'] parameters = json.loads(request.values['parameters']) auth_code = request.values.get('authCode', '') # fix(soon): migrate away from this if auth_code: key = find_key_by_code( auth_code ) # fix(faster): we already looked up key in access_level function elif request.authorization: key = find_key(request.authorization.password) if not key: abort(403) sender_controller_id = key.access_as_controller_id # None if access as user sender_user_id = key.access_as_user_id # None if access as controller message_queue.add(folder.id, type, parameters, sender_controller_id=sender_controller_id, sender_user_id=sender_user_id) return {'status': 'ok'}
def send_process_status(self): from main.app import db # import here to avoid import loop from main.app import message_queue # import here to avoid import loop from main.resources.resource_util import find_resource # import here to avoid import loop process_id = os.getpid() connections = [] for ws_conn in self.connections: connections.append({ 'connected': ws_conn.connected(), 'controller_id': ws_conn.controller_id, 'user_id': ws_conn.user_id, 'auth_method': ws_conn.auth_method, 'process_id': process_id, 'subscriptions': [s.as_dict() for s in ws_conn.subscriptions], }) parameters = { 'process_id': process_id, 'clients': connections, # fix(later): rename to connections? 'db_pool': db.engine.pool.size(), 'db_conn': db.engine.pool.checkedout(), } system_folder_id = find_resource('/system').id message_queue.add(system_folder_id, '/system', 'processStatus', parameters)
def create_flow_user(email, username, password, fullname, is_sso, is_admin): # # Check if user exists # user = User.query.filter(User.user_name == username).first() if user is not None: print("User %s exists." % (username)) return user_type = User.STANDARD_USER if is_admin: user_type = User.SYSTEM_ADMIN # # Create user # print("Creating user %s" % (username)) user_id = create_user(email, username, password, fullname, user_type) # # Add user to flow organization # print("Creating organization user.") org_user = OrganizationUser() org_user.organization_id = find_resource('/testing').id org_user.user_id = user_id org_user.is_admin = is_admin db.session.add(org_user) db.session.commit() # # Create a folder for this user to store their programs # and a folder for recorded datasets (sequences) # folders = [ 'testing/student-folders/%s/programs' % (username), 'testing/student-folders/%s/datasets' % (username) ] for folder in folders: print("Creating student folder %s." % (folder)) _create_folders(folder) # # Add some user metadata # path = '%s/%s/%s/userinfo' % ('testing', 'student-folders', username) content = json.dumps({'is_sso': is_sso}) now = datetime.datetime.now() resource = _create_file(path, now, now, content) # print('Created flow user: %s' % (email)) user = User.query.filter(User.id == user_id).first() return user
def _list(): resource = find_resource(path) children = Resource.query.filter(Resource.parent_id == resource.id, Resource.deleted == False) items = [] for child in children: metadata = None # # For datasets, populate metadata. # if type == 'datasets': ds_path = path + "/" + child.name file = find_resource(ds_path + "/metadata") if file is not None: metadata = read_resource(file) if metadata is not None: metadata = json.loads(metadata) metadata['recording_location'] = ds_path elif type == 'sequences': if child.name == 'metadata': ds_path = path + "/" + child.name file = find_resource(ds_path) if file is not None: metadata = read_resource(file) if metadata is not None: metadata = json.loads(metadata) elif type == 'programs': ds_path = path + "/" + child.name file = find_resource(ds_path + "/metadata") if file is not None: metadata = read_resource(file) if metadata is not None: metadata = json.loads(metadata) items.append({ 'name': child.name, 'metadata': metadata } ) return json.dumps({ 'success': True, 'message': 'Listed %s' % (path), 'items': items })
def worker_log(worker_name, message): print(worker_name + ': ' + message) name = '/system/worker/log' log_resource = find_resource(name) if log_resource: update_sequence_value(log_resource, name, datetime.datetime.utcnow(), str(worker_name + ': ' + message)) # convert unicode to plain string else: print('worker log (%s) missing' % name)
def delete_flow_user(username): # # Delete the specified user by username # user = User.query.filter(User.user_name == username).first() if user is None: print("No such user %s." % (username)) return # # Delete user folder # student_folder = find_resource('/testing/student-folders/%s' % (username)) if student_folder is not None: print("Deleting student folder %s." % (student_folder.name)) db.session.delete(student_folder) db.session.commit() else: print("No student folder to delete.") # # Delete organization user # org_id = find_resource('/testing').id org_user = OrganizationUser.query.filter( OrganizationUser.organization_id == org_id, OrganizationUser.user_id == user.id).first() if org_user is not None: print("Deleting organization user.") db.session.delete(org_user) db.session.commit() else: print("No organization user to delete.") # # Now delete the user # db.session.delete(user) db.session.commit() print('Deleted flow user: %s.' % (username))
def create_admin_user(email_address, password): """Create a new system administrator user.""" assert '.' in email_address and '@' in email_address user_id = create_user(email_address, '', password, 'System Admin', User.SYSTEM_ADMIN) org_user = OrganizationUser() # add to system organization org_user.organization_id = find_resource('/system').id org_user.user_id = user_id org_user.is_admin = True db.session.add(org_user) db.session.commit()
def _load(): resource = find_resource(path) data = read_resource(resource) if data is not None: data = data.decode('utf-8') return json.dumps({ 'success': True, 'message': 'Loaded file %s.' % (resource.name), 'content': data },ensure_ascii=False)
def get_flow_userinfo(username): path = '%s/%s/%s/userinfo' % ('testing', 'student-folders', username) resource = find_resource(path) if resource is None: return {} data = read_resource(resource) userinfo = json.loads(data) return userinfo
def put(self): start_time = time.time() values = json.loads(request.values['values']) if 'timestamp' in request.values: timestamp = parse_json_datetime(request.values['timestamp']) # check for drift delta = datetime.datetime.utcnow() - timestamp drift = delta.total_seconds() #print 'drift', drift if abs(drift) > 30: # get current controller correction # fix(later): support user updates as well? auth = request.authorization start_key_time = time.time() key = find_key_fast(auth.password) # key is provided as HTTP basic auth password end_key_time = time.time() #print '---- key: %.2f' % (end_key_time - start_key_time) if key and key.access_as_controller_id: controller_id = key.access_as_controller_id controller_status = ControllerStatus.query.filter(ControllerStatus.id == controller_id).one() attributes = json.loads(controller_status.attributes) correction = attributes.get('timestamp_correction', 0) # if stored correction is reasonable, use it; otherwise store new correction if abs(correction - drift) > 100: correction = drift attributes['timestamp_correction'] = drift controller_status.attributes = json.dumps(attributes) db.session.commit() #print 'storing new correction (%.2f)' % correction else: pass #print 'applying previous correction (%.2f)' % correction timestamp += datetime.timedelta(seconds=correction) else: timestamp = datetime.datetime.utcnow() # for now, assume all sequences in same folder first_name = values.iterkeys().next() folder_name = first_name.rsplit('/', 1)[0] folder_resource = find_resource(folder_name) if folder_resource: # and access_level(folder_resource.query_permissions()) >= ACCESS_LEVEL_WRITE: for (full_name, value) in values.iteritems(): seq_name = full_name.rsplit('/', 1)[1] try: resource = Resource.query.filter(Resource.parent_id == folder_resource.id, Resource.name == seq_name, Resource.deleted == False).one() update_sequence_value(resource, full_name, timestamp, str(value), emit_message=False) # fix(later): revisit emit_message except NoResultFound: pass db.session.commit() end_time = time.time()
def delete(self, resource_path): r = find_resource('/' + resource_path) if not r: abort(404) # fix(later): revisit to avoid leaking file existance if access_level(r.query_permissions()) < ACCESS_LEVEL_WRITE: abort(403) if request.values.get('data_only', False): ResourceRevision.query.filter(ResourceRevision.resource_id == r.id).delete() # fix(later): support delete_min_timestamp and delete_max_timestamp to delete subsets else: r.deleted = True db.session.commit() return {'status': 'ok', 'id': r.id}
def _delete(): resource = find_resource(path) if resource is None: return json.dumps({ 'success': False, 'message': 'Cannot find file %s.' % (filename) }) db.session.delete(resource) db.session.commit() return json.dumps({ 'success': True, 'message': 'Deleted file %s.' % (resource.name) })
(options, args) = parser.parse_args() # DB operations if options.init_db: print('creating/updating database') db.create_all() create_system_resources() elif options.create_admin: parts = options.create_admin.split(':') email_address = parts[0] password = parts[1] assert '.' in email_address and '@' in email_address user_id = create_user(email_address, '', password, 'System Admin', User.SYSTEM_ADMIN) org_user = OrganizationUser() # add to system organization org_user.organization_id = find_resource('/system').id org_user.user_id = user_id org_user.is_admin = True db.session.add(org_user) db.session.commit() print('created system admin: %s' % email_address) elif options.migrate_db: migrate_keys() # start the debug server else: if options.enable_web_sockets: print('running with websockets') run_with_web_sockets() else: app.run(debug=True)
def process_web_socket_message(message_struct, ws_conn): message_type = message_struct['type'] message_debug = False # handle new connection; no longer used; websocket should be authenticated using HTTP basic auth if message_type == 'connect': pass # handle watchdog message (updates controller status record) elif message_type == 'watchdog': if ws_conn.controller_id: controller_status = ControllerStatus.query.filter( ControllerStatus.id == ws_conn.controller_id).one() controller_status.last_watchdog_timestamp = datetime.datetime.utcnow( ) db.session.commit() # handle ping (does nothing; used to keep connection active) elif message_type == 'ping': pass # handle subscription (used to subscribe to messages from one or more folders) elif message_type == 'subscribe': # process the subscription parameters = message_struct['parameters'] subscriptions = parameters.get('subscriptions', []) for subscription in subscriptions: # fix(clean): remove support for folder IDs and old message args folder_path = subscription.get('folder', subscription.get('folderId', None)) message_type = subscription.get( 'message_type', subscription.get('messageType', None)) include_children = subscription.get( 'include_children', subscription.get('includeChildren', False)) # fix(clean): remove "[self]" option if folder_path == 'self' or folder_path == '[self]': folder_id = ws_conn.controller_id elif hasattr(folder_path, 'strip'): resource = find_resource(folder_path) if not resource: print('unable to find subscription folder: %s' % folder_path) return folder_id = resource.id else: folder_id = folder_path # if subscription is allowed, store it # fix(later): send a message back if not allowed if ws_conn.access_level(folder_id) >= ACCESS_LEVEL_READ: if message_debug: print('subscribe folder: %s (%d), message type: %s' % (folder_path, folder_id, message_type)) ws_conn.subscriptions.append( MessageSubscription(folder_id, message_type, include_children=include_children)) # fix(soon): remove this case after clients are updated elif message_type == 'setNode' or message_type == 'updateSequence' or message_type == 'update_sequence': if ws_conn.controller_id: parameters = message_struct['parameters'] if message_type == 'setNode': # fix(soon): remove this case seq_name = parameters['node'] else: seq_name = parameters['sequence'] if not seq_name.startswith('/'): # handle relative sequence names resource = Resource.query.filter( Resource.id == ws_conn.controller_id).one() # this is ok for now since .. doesn't have special meaning in resource path (no way to escape controller folder) seq_name = resource.path() + '/' + seq_name timestamp = parameters.get( 'timestamp', '') # fix(soon): need to convert to datetime if not timestamp: timestamp = datetime.datetime.utcnow() value = parameters['value'] if 'encoded' in parameters: value = base64.b64decode(value) # remove this; require clients to use REST POST for images resource = find_resource(seq_name) if not resource: return system_attributes = json.loads( resource.system_attributes ) if resource.system_attributes else None if system_attributes and system_attributes[ 'data_type'] == Resource.IMAGE_SEQUENCE: value = base64.b64decode(value) else: value = str(value) update_sequence_value(resource, seq_name, timestamp, value) db.session.commit() # update a resource elif message_type == 'write_resource': parameters = message_struct['parameters'] if 'path' and 'data' in parameters: path = parameters['path'] if not path.startswith( '/'): # fix(soon): remove this after clients updated path = '/' + path data = parameters['data'] resource = find_resource(path) if resource: if ws_conn.access_level(resource.id) >= ACCESS_LEVEL_WRITE: timestamp = datetime.datetime.utcnow() update_sequence_value(resource, path, timestamp, data) db.session.commit() else: socket_sender.send_error(ws_conn, 'permission error: %s' % path) else: socket_sender.send_error(ws_conn, 'resource not found: %s' % path) else: socket_sender.send_error( ws_conn, 'expected data and path parameters for write_resource message') # handle request for detailed message logging elif message_type == 'debug_messaging': parameters = message_struct['parameters'] enable = bool(parameters['enable']) level = logging.DEBUG if enable else logging.INFO logging.getLogger().setLevel(level) # handle other action messages elif message_type in ('sendEmail', 'sendTextMessage', 'send_email', 'send_text_message'): if ws_conn.controller_id: # only support these messages from controllers, not browsers if message_type == 'sendEmail' or message_type == 'send_email': handle_send_email(ws_conn.controller_id, message_struct['parameters']) elif message_type == 'sendTextMessage' or message_type == 'send_text_message': handle_send_text_message(ws_conn.controller_id, message_struct['parameters']) # for other types, assume that we want to create a message record else: # figure out target folder if 'folder' in message_struct: folder_name = message_struct['folder'] if message_debug: print('message to folder: %s' % folder_name) if hasattr(folder_name, 'startswith') and folder_name.startswith('/'): if message_debug: print('message to folder name: %s' % folder_name) folder = find_resource(folder_name) # assumes leading slash if folder: folder_id = folder.id if message_debug: print('message to folder id: %d' % folder_id) else: print('message to unknown folder (%s)' % folder_name) return else: folder_id = folder_name # fix(soon): remove this case elif ws_conn.controller_id: folder_id = ws_conn.controller_id else: print('message (%s) without folder or controller; discarding' % message_type) return # if allowed, create a message for the folder if ws_conn.access_level(folder_id) >= ACCESS_LEVEL_WRITE: parameters = message_struct['parameters'] # fix(soon): can we move this spawn above access level check (might require request context) Thread(target=message_queue.add, daemon=True, args=[folder_id, None, message_type, parameters], kwargs={ 'sender_controller_id': ws_conn.controller_id, 'sender_user_id': ws_conn.user_id }).start()
def on_message(client, userdata, msg): # pylint: disable=unused-argument payload = msg.payload.decode() # handle full (JSON) messages if payload.startswith('{'): message_struct = json.loads(payload) for message_type, parameters in message_struct.items(): # update sequence values; doesn't support image sequence; should use REST API for image sequences if message_type == 'update': folder = find_resource( '/' + msg.topic ) # for now we assume these messages are published on controller channels if folder and folder.type in (Resource.BASIC_FOLDER, Resource.ORGANIZATION_FOLDER, Resource.CONTROLLER_FOLDER): timestamp = parameters.get('$t', '') if timestamp: timestamp = parse_json_datetime( timestamp ) # fix(soon): handle conversion errors else: timestamp = datetime.datetime.utcnow() for name, value in parameters.items(): if name != '$t': seq_name = '/' + msg.topic + '/' + name resource = find_resource(seq_name) if resource: # don't emit new message since UI will receive this message update_sequence_value(resource, seq_name, timestamp, value, emit_message=False) db.session.commit() # update controller watchdog status elif message_type == 'watchdog': controller = find_resource( '/' + msg.topic ) # for now we assume these messages are published on controller channels if controller and controller.type == Resource.CONTROLLER_FOLDER: controller_status = ControllerStatus.query.filter( ControllerStatus.id == controller.id).one() controller_status.last_watchdog_timestamp = datetime.datetime.utcnow( ) db.session.commit() # send emails elif message_type == 'send_email': controller = find_resource( '/' + msg.topic ) # for now we assume these messages are published on controller channels if controller and controller.type == Resource.CONTROLLER_FOLDER: print('sending email') handle_send_email(controller.id, parameters) # send SMS messages elif message_type == 'send_sms' or message_type == 'send_text_message': controller = find_resource( '/' + msg.topic ) # for now we assume these messages are published on controller channels if controller and controller.type == Resource.CONTROLLER_FOLDER: handle_send_text_message(controller.id, parameters) # handle short (non-JSON) messages else: # print('MQTT: %s %s' % (msg.topic, payload)) if payload.startswith( 's,' ): # type 's' is "store and display new sequence value" parts = payload.split(',', 3) if len(parts) == 4: seq_name = '/' + msg.topic + '/' + parts[1] timestamp = parse_json_datetime( parts[2]) # fix(soon): handle conversion errors value = parts[3] resource = find_resource(seq_name) if resource and resource.type == Resource.SEQUENCE: # don't emit new message since UI will receive this message update_sequence_value(resource, seq_name, timestamp, value, emit_message=False) db.session.commit()
def view_home(): resource = find_resource('/system/home.md') if not resource: resource = find_resource('/system/home') # fix(soon): remove this return file_viewer(resource, is_home_page=True)
def put(self, resource_path): r = find_resource('/' + resource_path) if not r: abort(404) # fix(later): revisit to avoid leaking file existance if access_level(r.query_permissions()) < ACCESS_LEVEL_WRITE: abort(403) args = request.values # update resource name/location if 'name' in args: new_name = args['name'] if new_name != r.name: try: Resource.query.filter(Resource.parent_id == r.parent_id, Resource.name == new_name, Resource.deleted == False).one() abort(400) # a resource already exists with this name except NoResultFound: pass r.name = new_name if 'parent' in args: parent_resource = find_resource(args['parent']) # expects leading slash if not parent_resource: abort(400) if access_level(parent_resource.query_permissions()) < ACCESS_LEVEL_WRITE: abort(403) try: Resource.query.filter(Resource.parent_id == parent_resource.id, Resource.name == r.name, Resource.deleted == False).one() abort(400) # a resource already exists with this name except NoResultFound: pass r.parent_id = parent_resource.id # update view if 'view' in args and current_user.is_authenticated: try: resource_view = ResourceView.query.filter(ResourceView.resource_id == r.id, ResourceView.user_id == current_user.id).one() resource_view.view = args['view'] except NoResultFound: resource_view = ResourceView() resource_view.resource_id = r.id resource_view.user_id = current_user.id resource_view.view = args['view'] db.session.add(resource_view) # update other resource metadata if 'user_attributes' in args: r.user_attributes = args['user_attributes'] if r.type == Resource.SEQUENCE: if 'data_type' in args or 'decimal_places' in args or 'max_history' in args or 'min_storage_interval' in args or 'units' in args: system_attributes = json.loads(r.system_attributes) if 'data_type' in args: system_attributes['data_type'] = args['data_type'] if args.get('decimal_places', '') != '': system_attributes['decimal_places'] = int(args['decimal_places']) # fix(later): safe convert if args.get('max_history', '') != '': system_attributes['max_history'] = int(args['max_history']) # fix(later): safe convert if args.get('units', '') != '': system_attributes['units'] = args['units'] if args.get('min_storage_interval', '') != '': system_attributes['min_storage_interval'] = int(args['min_storage_interval']) # fix(later): safe convert r.system_attributes = json.dumps(system_attributes) elif r.type == Resource.REMOTE_FOLDER: system_attributes = json.loads(r.system_attributes) if r.system_attributes else {} if 'remote_path' in args: system_attributes['remote_path'] = args['remote_path'] if 'controller_id' in args: system_attributes['controller_id'] = args['controller_id'] r.system_attributes = json.dumps(system_attributes) elif r.type == Resource.ORGANIZATION_FOLDER: system_attributes = json.loads(r.system_attributes) if r.system_attributes else {} if 'full_name' in args and args['full_name']: system_attributes['full_name'] = args['full_name'] r.system_attributes = json.dumps(system_attributes) elif r.type == Resource.CONTROLLER_FOLDER: if 'status' in args: try: controller_status = ControllerStatus.query.filter(ControllerStatus.id == r.id).one() status = json.loads(controller_status.attributes) status.update(json.loads(args['status'])) # add/update status (don't provide way to remove status fields; maybe should overwrite instead) controller_status.attributes = json.dumps(status) except NoResultFound: pass else: # fix(soon): remove this case if 'system_attributes' in args: r.system_attributes = args['system_attributes'] # note that this will overwrite any existing system attributes; client must preserve any that aren't modified # update resource contents/value if 'contents' in args or 'data' in args: # fix(later): remove contents option if 'contents' in args: data = args['contents'] else: data = str(args['data']) # convert unicode to regular string / fix(soon): revisit this timestamp = datetime.datetime.utcnow() if r.type == Resource.SEQUENCE: # fix(later): collapse these two cases? resource_path = resource.path() # fix(faster): don't need to use this if were given path as arg update_sequence_value(resource, resource_path, timestamp, data) else: add_resource_revision(r, timestamp, data) r.modification_timestamp = timestamp db.session.commit() return {'status': 'ok', 'id': r.id}
def get(self, resource_path): args = request.values result = {} # handle case of controller requesting about self if resource_path == 'self': if 'authCode' in request.values: auth_code = request.values.get('authCode', '') # fix(soon): remove auth codes key = find_key_by_code(auth_code) elif request.authorization: key = find_key(request.authorization.password) else: key = None if key and key.access_as_controller_id: try: r = Resource.query.filter(Resource.id == key.access_as_controller_id).one() except NoResultFound: abort(404) else: abort(403) # look up the resource record else: r = find_resource('/' + resource_path) if not r: abort(404) # fix(later): revisit to avoid leaking file existance if access_level(r.query_permissions()) < ACCESS_LEVEL_READ: abort(403) # if request meta-data if request.values.get('meta', False): result = r.as_dict(extended = True) if request.values.get('include_path', False): result['path'] = r.path() # if request data else: # if folder, return contents list or zip of collection of files if r.type >= 10 and r.type < 20: # multi-file download if 'ids' in args and args.get('download', False): ids = args['ids'].split(',') return batch_download(r, ids) # contents list else: recursive = request.values.get('recursive', False) type_name = request.values.get('type', None) if type_name: type = resource_type_number(type_name) else: type = None filter = request.values.get('filter', None) extended = request.values.get('extended', False) result = resource_list(r.id, recursive, type, filter, extended) # if sequence, return value(s) # fix(later): merge with file case? elif r.type == Resource.SEQUENCE: # get parameters text = request.values.get('text', '') download = request.values.get('download', False) count = int(request.values.get('count', 1)) start_timestamp = request.values.get('start_timestamp', '') end_timestamp = request.values.get('end_timestamp', '') if start_timestamp: try: start_timestamp = parse_json_datetime(start_timestamp) except: abort(400, 'Invalid date/time.') if end_timestamp: try: end_timestamp = parse_json_datetime(end_timestamp) except: abort(400, 'Invalid date/time.') # if filters specified, assume we want a sequence of values if text or start_timestamp or end_timestamp or count > 1: # get summary of values if int(request.values.get('summary', False)): return sequence_value_summary(r.id) # get preliminary set of values resource_revisions = ResourceRevision.query.filter(ResourceRevision.resource_id == r.id) # apply filters (if any) if text: resource_revisions = resource_revisions.filter(text in ResourceRevision.data) if start_timestamp: resource_revisions = resource_revisions.filter(ResourceRevision.timestamp >= start_timestamp) if end_timestamp: resource_revisions = resource_revisions.filter(ResourceRevision.timestamp <= end_timestamp) resource_revisions = resource_revisions.order_by('timestamp') if resource_revisions.count() > count: resource_revisions = resource_revisions[-count:] # fix(later): is there a better/faster way to do this? # return data if download: #timezone = r.root().system_attributes['timezone'] # fix(soon): use this instead of UTC lines = ['utc_timestamp,value\n'] for rr in resource_revisions: lines.append('%s,%s\n' % (rr.timestamp.strftime('%Y-%m-%d %H:%M:%S.%f'), rr.data)) result = make_response(''.join(lines)) result.headers['Content-Type'] = 'application/octet-stream' result.headers['Content-Disposition'] = 'attachment; filename=' + r.name + '.csv' return result else: epoch = datetime.datetime.utcfromtimestamp(0) # fix(clean): merge with similar code for sequence viewer timestamps = [(rr.timestamp.replace(tzinfo = None) - epoch).total_seconds() for rr in resource_revisions] # fix(clean): use some sort of unzip function values = [rr.data for rr in resource_revisions] units = json.loads(r.system_attributes).get('units', None) return {'name': r.name, 'units': units, 'timestamps': timestamps, 'values': values} # if no filter assume just want current value # fix(later): should instead provide all values and have a separate way to get more recent value? else: rev = request.values.get('rev') if rev: rev = int(rev) # fix(soon): save int conversion result = make_response(read_resource(r, revision_id = rev)) data_type = json.loads(r.system_attributes)['data_type'] if data_type == Resource.IMAGE_SEQUENCE: result.headers['Content-Type'] = 'image/jpeg' else: result.headers['Content-Type'] = 'text/plain' # if file, return file data/contents else: data = read_resource(r) if not data: abort(404) name = r.name if request.values.get('convert_to', request.values.get('convertTo', '')) == 'xls' and r.name.endswith('csv'): data = convert_csv_to_xls(data) name = name.replace('csv', 'xls') result = make_response(data) result.headers['Content-Type'] = 'application/octet-stream' if request.values.get('download', False): result.headers['Content-Disposition'] = 'attachment; filename=' + name return result
def process_web_socket_message(message_struct, ws_conn): type = message_struct['type'] message_debug = False # handle new connection (updates controller status record) if type == 'connect': # fix(soon): remove this parameters = message_struct['parameters'] print 'connect message' # clients/controllers should send authCode in connect message if 'authCode' in parameters: auth_code = parameters['authCode'] key = find_key_by_code(auth_code) if key and key.access_as_controller_id: controller_resource = Resource.query.filter( Resource.id == key.access_as_controller_id).one() # handle child controller if 'name' in parameters: key_resource = controller_resource controller_resource = None # look for a resource with the given name that is a child of the controller referenced by the key candidate_resources = Resource.query.filter( Resource.name == parameters['name'], Resource.deleted == False) for resource in candidate_resources: if resource.is_descendent_of(key_resource.id): controller_resource = resource break if not controller_resource: ws_conn.ws.close() print('unable to find child controller: %s' % parameters['name'] ) # fix(soon): what should we do in this case? return ws_conn.controller_id = controller_resource.id ws_conn.auth_method = 'authCode' try: controller_status = ControllerStatus.query.filter( ControllerStatus.id == ws_conn.controller_id).one() controller_status.last_connect_timestamp = datetime.datetime.utcnow( ) controller_status.client_version = parameters.get( 'version', None) db.session.commit() except NoResultFound: pass else: ws_conn.ws.close() print('invalid auth code' ) # fix(soon): what should we do in this case? # handle watchdog message (updates controller status record) elif type == 'watchdog': if ws_conn.controller_id: controller_status = ControllerStatus.query.filter( ControllerStatus.id == ws_conn.controller_id).one() controller_status.last_watchdog_timestamp = datetime.datetime.utcnow( ) db.session.commit() # handle ping (does nothing; used to keep connection active) elif type == 'ping': pass # handle subscription (used to subscribe to messages from one or more folders) elif type == 'subscribe': # process the subscription parameters = message_struct['parameters'] subscriptions = parameters.get('subscriptions', []) for subscription in subscriptions: folder_path = subscription.get( 'folder', subscription.get('folderId', None) ) # fix(clean): remove support for folder IDs and old message args message_type = subscription.get( 'message_type', subscription.get('messageType', None)) include_children = subscription.get( 'include_children', subscription.get('includeChildren', False)) # fix(clean): remove "[self]" option if folder_path == 'self' or folder_path == '[self]': folder_id = ws_conn.controller_id elif hasattr(folder_path, 'strip'): resource = find_resource(folder_path) if not resource: print('unable to find subscription folder: %s' % folder_path) return folder_id = resource.id else: folder_id = folder_path # if subscription is allowed, store it # fix(later): send a message back if not allowed if ws_conn.access_level(folder_id) >= ACCESS_LEVEL_READ: if message_debug: print('subscribe folder: %s (%d), message type: %s' % (folder_path, folder_id, message_type)) ws_conn.subscriptions.append( MessageSubscription(folder_id, message_type, include_children=include_children)) # fix(soon): remove this case after clients are updated elif type == 'setNode' or type == 'updateSequence' or type == 'update_sequence': if ws_conn.controller_id: parameters = message_struct['parameters'] if type == 'setNode': # fix(soon): remove this case seq_name = parameters['node'] else: seq_name = parameters['sequence'] if not seq_name.startswith('/'): # handle relative sequence names resource = Resource.query.filter( Resource.id == ws_conn.controller_id).one() seq_name = resource.path( ) + '/' + seq_name # this is ok for now since .. doesn't have special meaning in resource path (no way to escape controller folder) timestamp = parameters.get( 'timestamp', '') # fix(soon): need to convert to datetime if not timestamp: timestamp = datetime.datetime.utcnow() value = parameters['value'] if 'encoded' in parameters: value = base64.b64decode(value) # remove this; require clients to use REST POST for images resource = find_resource(seq_name) if not resource: return system_attributes = json.loads( resource.system_attributes ) if resource.system_attributes else None if system_attributes and system_attributes[ 'data_type'] == Resource.IMAGE_SEQUENCE: value = base64.b64decode(value) else: value = str(value) update_sequence_value(resource, seq_name, timestamp, value) db.session.commit() # update a resource elif type == 'write_resource': if 'path' and 'data' in parameters: path = parameters['path'] if not path.startswith( '/'): # fix(soon): remove this after clients updated path = '/' + path data = parameters['data'] resource = find_resource(path) if resource: if ws_conn.access_level(resource.id) >= ACCESS_LEVEL_WRITE: timestamp = datetime.datetime.utcnow() update_sequence_value(resource, path, timestamp, data) db.session.commit() else: socket_sender.send_error(ws_conn, 'permission error: %s' % path) else: socket_sender.send_error(ws_conn, 'resource not found: %s' % path) else: socket_sender.send_error( ws_conn, 'expected data and path parameters for write_resource message') # handle other action messages elif type in ('sendEmail', 'sendTextMessage', 'send_email', 'send_text_message'): if ws_conn.controller_id: # only support these messages from controllers, not browsers if type == 'sendEmail' or type == 'send_email': handle_send_email(ws_conn.controller_id, message_struct['parameters']) elif type == 'sendTextMessage' or type == 'send_text_message': handle_send_text_message(ws_conn.controller_id, message_struct['parameters']) # for other types, assume that we want to create a message record else: # figure out target folder if 'folder' in message_struct: folder_name = message_struct['folder'] if message_debug: print('message to folder: %s' % folder_name) if hasattr(folder_name, 'startswith') and folder_name.startswith('/'): if message_debug: print('message to folder name: %s' % folder_name) folder = find_resource(folder_name) # assumes leading slash if folder: folder_id = folder.id if message_debug: print('message to folder id: %d' % folder_id) else: print('message to unknown folder (%s)' % folder_name) return else: folder_id = folder_name # fix(soon): remove this case elif ws_conn.controller_id: folder_id = ws_conn.controller_id else: print('message (%s) without folder or controller; discarding' % type) return # if allowed, create a message for the folder if ws_conn.access_level(folder_id) >= ACCESS_LEVEL_WRITE: parameters = message_struct['parameters'] # fix(soon): can we move this spawn above access level check (might require request context) gevent.spawn(message_queue.add, folder_id, type, parameters, sender_controller_id=ws_conn.controller_id, sender_user_id=ws_conn.user_id)
def post(self): args = request.values # get parent path = args.get('path', args.get('parent')) # fix(soon): decide whether to use path or parent if not path: abort(400) parent_resource = find_resource(path) # expects leading slash if not parent_resource: try: # fix(soon): need to traverse up tree to check permissions, not just check org permissions org_name = path.split('/')[1] org_resource = Resource.query.filter(Resource.name == org_name, Resource.parent_id == None, Resource.deleted == False).one() if access_level(org_resource.query_permissions()) < ACCESS_LEVEL_WRITE: abort(403) except NoResultFound: abort(403) _create_folders(path.strip('/')) parent_resource = find_resource(path) if not parent_resource: abort(400) # make sure we have write access to parent if access_level(parent_resource.query_permissions()) < ACCESS_LEVEL_WRITE: abort(403) # get main parameters file = request.files.get('file', None) name = file.filename if file else args['name'] type = int(args['type']) # fix(soon): safe int conversion # get timestamps if 'creation_timestamp' in args: creation_timestamp = parse_json_datetime(args['creation_timestamp']) elif 'creationTimestamp' in args: creation_timestamp = parse_json_datetime(args['creationTimestamp']) else: creation_timestamp = datetime.datetime.utcnow() if 'modification_timestamp' in args: modification_timestamp = parse_json_datetime(args['modification_timestamp']) elif 'modificationTimestamp' in args: modification_timestamp = parse_json_datetime(args['modificationTimestamp']) else: modification_timestamp = creation_timestamp # check for existing resource try: resource = Resource.query.filter(Resource.parent_id == parent_resource.id, Resource.name == name, Resource.deleted == False).one() return {'message': 'Resource already exists.', 'status': 'error'} # fix(soon): return 400 status code except NoResultFound: pass # create resource r = Resource() r.parent_id = parent_resource.id r.organization_id = parent_resource.organization_id r.name = name r.type = type r.creation_timestamp = creation_timestamp r.modification_timestamp = modification_timestamp if type == Resource.FILE: # temporarily mark resource as deleted in case we fail to create resource revision record r.deleted = True else: r.deleted = False if 'user_attributes' in args: r.user_attributes = args['user_attributes'] # we assume that the attributes are already a JSON string # handle sub-types if type == Resource.FILE: # get file contents (if any) from request if file: stream = cStringIO.StringIO() file.save(stream) data = stream.getvalue() else: data = base64.b64decode(args.get('contents', args.get('data', ''))) # fix(clean): remove contents version # convert files to standard types/formgat # fix(soon): should give the user a warning or ask for confirmation if name.endswith('xls') or name.endswith('xlsx'): data = convert_xls_to_csv(data) name = name.rsplit('.')[0] + '.csv' r.name = name if name.endswith('csv') or name.endswith('txt'): data = convert_new_lines(data) # compute other file attributes system_attributes = { 'hash': hashlib.sha1(data).hexdigest(), 'size': len(data), } if 'file_type' in args: # fix(soon): can we remove this? current just using for markdown files system_attributes['file_type'] = args['file_type'] r.system_attributes = json.dumps(system_attributes) elif type == Resource.SEQUENCE: data_type = int(args['data_type']) # fix(soon): safe convert to int system_attributes = { 'max_history': 10000, 'data_type': data_type, } if args.get('decimal_places', '') != '': system_attributes['decimal_places'] = int(args['decimal_places']) # fix(soon): safe convert to int if args.get('min_storage_interval', '') != '': min_storage_interval = int(args['min_storage_interval']) # fix(soon): safe convert to int else: if data_type == Resource.TEXT_SEQUENCE: min_storage_interval = 0 # default to 0 seconds for text sequences (want to record all log entries) else: min_storage_interval = 50 # default to 50 seconds for numeric and image sequences if args.get('units'): system_attributes['units'] = args['units'] system_attributes['min_storage_interval'] = min_storage_interval r.system_attributes = json.dumps(system_attributes) elif type == Resource.REMOTE_FOLDER: r.system_attributes = json.dumps({ 'remote_path': args['remote_path'], }) # save resource record db.session.add(r) db.session.commit() # save file contents (after we have resource ID) and compute thumbnail if needed if type == Resource.FILE: add_resource_revision(r, r.creation_timestamp, data) r.deleted = False # now that have sucessfully created revision, we can make the resource live db.session.commit() # compute thumbnail # fix(soon): recompute thumbnail on resource update if name.endswith('.png') or name.endswith('.jpg'): # fix(later): handle more types, capitalizations for width in [120]: # fix(later): what will be our standard sizes? (thumbnail_contents, thumbnail_width, thumbnail_height) = compute_thumbnail(data, width) # fix(later): if this returns something other than requested width, we'll keep missing the cache thumbnail = Thumbnail() thumbnail.resource_id = r.id thumbnail.width = thumbnail_width thumbnail.height = thumbnail_height thumbnail.format = 'jpg' thumbnail.data = thumbnail_contents db.session.add(thumbnail) # handle the case of creating a controller; requires creating some additional records elif type == Resource.CONTROLLER_FOLDER: # create controller status record controller_status = ControllerStatus() controller_status.id = r.id controller_status.client_version = '' controller_status.web_socket_connected = False controller_status.watchdog_notification_sent = False controller_status.attributes = '{}' db.session.add(controller_status) db.session.commit() # create log sequence create_sequence(r, 'log', Resource.TEXT_SEQUENCE, max_history = 10000) # create a folder for status sequences status_folder = Resource() status_folder.parent_id = r.id status_folder.organization_id = r.organization_id status_folder.name = 'status' status_folder.type = Resource.BASIC_FOLDER status_folder.creation_timestamp = datetime.datetime.utcnow() status_folder.modification_timestamp = status_folder.creation_timestamp db.session.add(status_folder) db.session.commit() # create status sequences create_sequence(status_folder, 'free_disk_space', Resource.NUMERIC_SEQUENCE, max_history = 10000, units = 'bytes') create_sequence(status_folder, 'processor_usage', Resource.NUMERIC_SEQUENCE, max_history = 10000, units = 'percent') create_sequence(status_folder, 'messages_sent', Resource.NUMERIC_SEQUENCE, max_history = 10000) create_sequence(status_folder, 'messages_received', Resource.NUMERIC_SEQUENCE, max_history = 10000) create_sequence(status_folder, 'serial_errors', Resource.NUMERIC_SEQUENCE, max_history = 10000) return {'status': 'ok', 'id': r.id}