def remove_jupyter_mount(jupyter_mount_path, configuration): """ :param jupyter_mount_path path to a jupyter mount pickle state file :param configuration the MiG configuration object :return: void """ filename = os.path.basename(jupyter_mount_path) link_home = configuration.sessid_to_jupyter_mount_link_home # Remove jupyter mount session symlinks for the default sftp service for link in os.listdir(link_home): if link in filename: delete_symlink(os.path.join(link_home, link), configuration.logger) # Remove subsys sftp files if configuration.site_enable_sftp_subsys: auth_dir = os.path.join(configuration.mig_system_files, 'jupyter_mount') for auth_file in os.listdir(auth_dir): if auth_file.split('.authorized_keys')[0] in filename: delete_file(os.path.join(auth_dir, auth_file), configuration.logger) # Remove old pickle state file delete_file(jupyter_mount_path, configuration.logger)
def cleanup_notify_home(configuration, notified_users=[], timestamp=None): """Delete notification files based on either *notified_users* and/or file created timestamp""" logger = configuration.logger # logger.debug("cleanup_notify_home: %s, %s" # % (notified_users, timestamp)) # Remove notification files for notified users for client_id in notified_users: cleanup_files = received_notifications.get(client_id, {}).get('files', []) if not cleanup_files: logger.error( "Expected _NON_ empty files list for client_id: '%s'" % client_id) for filepath in cleanup_files: # logger.debug("Removing notification file: '%s'" % filepath) delete_file(filepath, logger) # Remove notification files based on timestamp if timestamp is not None: notify_home = configuration.notify_home now_timestamp = time.time() cleanuptime = now_timestamp - timestamp for direntry in os.listdir(notify_home): filepath = os.path.join(notify_home, direntry) ctime = os.path.getctime(filepath) if ctime > cleanuptime: # logger.debug("Removing OLD notification file: '%s'" # + ", ctime: %s, cleanuptime: %s" # % (filepath, ctime, cleanuptime)) delete_file(filepath, logger)
def save_twofactor_session(configuration, client_id, session_key, user_addr, user_agent, session_start, session_end=-1): """Save twofactor session dict for client_id""" _logger = configuration.logger if configuration.site_enable_gdp: client_id = get_base_client_id(configuration, client_id, expand_oid_alias=False) session_path = os.path.join(configuration.twofactor_home, session_key) if session_end < 0: session_end = session_start + twofactor_cookie_ttl session_data = { 'client_id': client_id, 'session_key': session_key, 'user_addr': user_addr, 'user_agent': user_agent, 'session_start': session_start, 'session_end': session_end } status = pickle(session_data, session_path, configuration.logger) if status and configuration.site_twofactor_strict_address: session_path_link = os.path.join(configuration.twofactor_home, "%s_%s" % (user_addr, session_key)) status = \ make_symlink(session_key, session_path_link, _logger, force=False) if not status: delete_file(session_path, _logger) return status
def delete_access_request(configuration, request_dir, req_name): """Delete the request file matching req_name with predefined request file extension in request_dir. """ req_path = os.path.join(request_dir, req_name) if not req_name.startswith(request_prefix) or \ not req_name.endswith(request_ext): raise ValueError("invalid request name: %s" % req_name) return delete_file(req_path, configuration.logger)
def delete_user_key(configuration, client_id, key_filename): """Delete the user key key_filename in settings dir""" key_dir = os.path.join(configuration.user_settings, client_id_dir(client_id), user_keys_dir) pub_filename = "%s.pub" % key_filename status, msg = True, "" for filename in (key_filename, pub_filename): path = os.path.join(key_dir, filename) if not delete_file(path, configuration.logger): msg += "removal of user key '%s' failed! \n" % filename status = False return (status, msg)
def reset_twofactor_key(client_id, configuration, seed=None, interval=None): """Reset 2FA secret key and write to user settings file in scrambled form. Return the new secret key on unscrambled base32 form. """ _logger = configuration.logger if configuration.site_enable_gdp: client_id = get_base_client_id(configuration, client_id, expand_oid_alias=False) client_dir = client_id_dir(client_id) key_path = os.path.join(configuration.user_settings, client_dir, twofactor_key_name) try: if pyotp is None: raise Exception("The pyotp module is missing and required for 2FA") if not seed: b32_key = pyotp.random_base32(length=twofactor_key_bytes) else: b32_key = seed # NOTE: pyotp.random_base32 returns unicode # which causes trouble with WSGI b32_key = force_utf8(b32_key) scrambled = scramble_password(configuration.site_password_salt, b32_key) key_fd = open(key_path, 'w') key_fd.write(scrambled) key_fd.close() # Reset interval interval_path = os.path.join(configuration.user_settings, client_dir, twofactor_interval_name) delete_file(interval_path, _logger, allow_missing=True) if interval: i_fd = open(interval_path, 'w') i_fd.write("%d" % interval) i_fd.close() except Exception, exc: _logger.error("failed in reset 2FA key: %s" % exc) return False
def delete_cert_req(req_id, configuration): """Helper to delete a pending certificate request""" req_path = os.path.join(configuration.user_pending, req_id) return delete_file(req_path, configuration.logger)
logger.info('Looping through files: %s' % \ ' '.join([i[0] for i in upload_files])) del_url = "uploadchunked.py?output_format=%s;action=delete;%s=%s;%s=%s" move_url = "uploadchunked.py?output_format=%s;action=move;%s=%s;%s=%s;%s=%s" # Please refer to https://github.com/blueimp/jQuery-File-Upload/wiki/Setup # for details about the status reply format in the uploadfile output object # All actions automatically take place relative to cache_dir. We only use # current_dir in move operation where it is the destination. if action == 'delete': for (rel_path, chunk_tuple) in upload_files: real_path = os.path.abspath(os.path.join(base_dir, rel_path)) deleted = delete_file(real_path, logger) uploaded.append({'object_type': 'uploadfile', rel_path: deleted}) logger.info('delete done: %s' % ' '.join([i[0] for i in upload_files])) return (output_objects, status) elif action == 'status': # Status automatically takes place relative to upload tmp dir for (rel_path, chunk_tuple) in upload_files: real_path = os.path.abspath(os.path.join(base_dir, rel_path)) file_entry = {'object_type': 'uploadfile', 'name': rel_path} file_entry['size'] = get_file_size(real_path, logger) file_entry['url'] = os.path.join("/cert_redirect", rel_path) if current_dir == upload_tmp_dir: file_entry["deleteType"] = "POST" file_entry["deleteUrl"] = del_url % \ (output_format, filename_field, os.path.basename(rel_path),
form_method = 'post' csrf_limit = get_csrf_limit(configuration) target_op = op_name csrf_token = make_csrf_token(configuration, form_method, target_op, client_id, csrf_limit) # Please refer to https://github.com/blueimp/jQuery-File-Upload/wiki/Setup # for details about the status reply format in the uploadfile output object # All actions automatically take place relative to dst_dir. We only use # current_dir in move operation where it is the destination. if action == 'delete': for (rel_path, chunk_tuple) in upload_files: abs_path = os.path.abspath(os.path.join(base_dir, rel_path)) deleted = delete_file(abs_path, logger) # Caller looks just for filename here since it is always relative uploaded.append({ 'object_type': 'uploadfile', os.path.basename(rel_path): deleted }) logger.info('delete done: %s' % ' '.join([i[0] for i in upload_files])) return (output_objects, status) elif action == 'status': # Status automatically takes place relative to dst_dir for (rel_path, chunk_tuple) in upload_files: abs_path = os.path.abspath(os.path.join(base_dir, rel_path)) file_entry = {'object_type': 'uploadfile', 'name': rel_path} file_entry['size'] = get_file_size(abs_path, logger) # NOTE: normpath+lstrip to avoid leading // and thus no base URL # NOTE: normpath to fix e.g. leading // which prevents base URL
def expire_twofactor_session(configuration, client_id, environ, allow_missing=False, user_addr=None, not_user_addr=None): """Expire active twofactor session for user with client_id. Looks up any corresponding session cookie and extracts the session_id. In case a matching session_id state file exists it is deleted after checking that it does indeed originate from the client_id. The optional user_addr argument is used to only expire the active session from a particular source address for client_id. Left to None in gdp mode to expire all sessions and make sure only one session is ever active at a time. The optional not_user_addr argument is used to expire all sessions NOT from a particular source address for client_id. """ _logger = configuration.logger if configuration.site_enable_gdp: client_id = get_base_client_id(configuration, client_id, expand_oid_alias=False) session_id = client_twofactor_session(configuration, client_id, environ) if not session_id: _logger.warning("no valid 2FA session found for %s" % client_id) if allow_missing: return True return False # Expire all client_id session files matching user_addr sessions = list_twofactor_sessions(configuration, client_id, user_addr) if not sessions: if allow_missing: _logger.info("No active 2FA session for %s (%s)" % (client_id, user_addr)) return True else: _logger.error("no 2FA session to expire for %s (%s)" % (client_id, user_addr)) return False expired = True for (session_key, session_data) in sessions.items(): if not_user_addr and session_data.get('user_addr', '') \ == not_user_addr: continue session_path = os.path.join(configuration.twofactor_home, session_key) # Already checked client_id and optionally user_addr match delete_status = True if configuration.site_twofactor_strict_address: session_user_addr = session_data.get('user_addr', None) if session_user_addr is None: delete_status = False else: session_link_path = \ os.path.join(configuration.twofactor_home, "%s_%s" % (session_user_addr, session_key)) delete_status = delete_symlink(session_link_path, _logger, allow_missing=allow_missing) if delete_status: delete_status = delete_file(session_path, _logger, allow_missing=allow_missing) if delete_status: _logger.info("expired 2FA session %s for %s in %s" % (session_data, client_id, session_path)) else: _logger.error("failed to delete 2FA session file %s for %s in %s" % (session_path, client_id, session_path)) expired = False return expired
def requeue_job( job_dict, failed_msg, job_queue, executing_queue, configuration, logger, ): """Requeue a failed job by moving it from executing_queue to job_queue""" if not job_dict: msg = 'requeue_job: %s is no longer in executing queue' print failed_msg logger.info(failed_msg) else: executing_queue.dequeue_job_by_id(job_dict['JOB_ID']) failed_timestamp = time.gmtime() # Clean up the server for files assosiated with the executing job if not job_dict.has_key('SESSIONID')\ or not job_dict.has_key('IOSESSIONID')\ or not server_cleanup( job_dict['SESSIONID'], job_dict['IOSESSIONID'], job_dict['LOCALJOBNAME'], job_dict['JOB_ID'], configuration, logger, ): logger.error('could not clean up MiG server') print 'CLEAN UP FAILED' client_dir = client_id_dir(job_dict['USER_CERT']) # Remove job result files, if they have arrived as the result is not valid # This can happen with sandboxes as they can't be stopped serverside status_prefix = os.path.join(configuration.user_home, client_dir, job_dict['JOB_ID']) io.delete_file(status_prefix + '.status', logger) io.delete_file(status_prefix + '.stdout', logger) io.delete_file(status_prefix + '.stderr', logger) # Generate execution history if not job_dict.has_key('EXECUTION_HISTORY'): job_dict['EXECUTION_HISTORY'] = [] history_dict = { 'QUEUED_TIMESTAMP': job_dict['QUEUED_TIMESTAMP'], 'EXECUTING_TIMESTAMP': job_dict['EXECUTING_TIMESTAMP'], 'FAILED_TIMESTAMP': failed_timestamp, 'FAILED_MESSAGE': failed_msg, 'UNIQUE_RESOURCE_NAME': job_dict['UNIQUE_RESOURCE_NAME'], 'RESOURCE_VGRID': job_dict.get('RESOURCE_VGRID', ''), 'PUBLICNAME': job_dict.get('PUBLICNAME', 'HIDDEN'), } job_dict['EXECUTION_HISTORY'].append(history_dict) # Retry if retries left job_dict['RETRY_COUNT'] = job_dict.get('RETRY_COUNT', 0) + 1 unique_resource_name = job_dict['UNIQUE_RESOURCE_NAME'] mrsl_file = os.path.join(configuration.mrsl_files_dir, client_dir, job_dict['JOB_ID'] + '.mRSL') job_retries = job_dict.get('RETRIES', configuration.job_retries) if job_dict['RETRY_COUNT'] <= job_retries: job_dict['STATUS'] = 'QUEUED' job_dict['QUEUED_TIMESTAMP'] = time.gmtime() del job_dict['EXECUTING_TIMESTAMP'] del job_dict['UNIQUE_RESOURCE_NAME'] del job_dict['EXE'] del job_dict['RESOURCE_CONFIG'] del job_dict['LOCALJOBNAME'] if job_dict.has_key('SESSIONID'): del job_dict['SESSIONID'] if job_dict.has_key('IOSESSIONID'): del job_dict['IOSESSIONID'] if job_dict.has_key('PUBLICNAME'): del job_dict['PUBLICNAME'] if job_dict.has_key('RESOURCE_VGRID'): del job_dict['RESOURCE_VGRID'] io.pickle(job_dict, mrsl_file, logger) # Requeue job last in queue for retry later job_queue.enqueue_job(job_dict, job_queue.queue_length()) msg = \ '%s failed to execute job %s - requeue for retry %d of %d'\ % (unique_resource_name, job_dict['JOB_ID'], job_dict['RETRY_COUNT'], job_retries) print msg logger.info(msg) else: job_dict['STATUS'] = 'FAILED' job_dict['FAILED_TIMESTAMP'] = failed_timestamp io.pickle(job_dict, mrsl_file, logger) # tell the user the sad news msg = 'Gave up on executing job %s after %d retries'\ % (job_dict['JOB_ID'], job_retries) logger.error(msg) print msg notify_user_thread( job_dict, configuration.myfiles_py_location, 'FAILED', logger, False, configuration, )
def delete_account_req(req_id, configuration): """Helper to delete a pending account request""" req_path = os.path.join(configuration.user_pending, req_id) return delete_file(req_path, configuration.logger)
def main(client_id, user_arguments_dict): """Main function used by front end""" (configuration, logger, output_objects, op_name) = \ initialize_main_variables(client_id, op_header=False) client_dir = client_id_dir(client_id) defaults = signature()[1] (validate_status, accepted) = validate_input_and_cert( user_arguments_dict, defaults, output_objects, client_id, configuration, allow_rejects=False, ) if not validate_status: WARNING_MSG = str(accepted) output_objects.append({'object_type': 'warning', 'text': WARNING_MSG}) return (accepted, returnvalues.CLIENT_ERROR) action = ''.join(accepted['action']) flags = ''.join(accepted['flags']) path = ''.join(accepted['path']) extension = ''.join(accepted['extension']) # Please note that base_dir must end in slash to avoid access to other # user dirs when own name is a prefix of another user name base_dir = os.path.abspath(os.path.join(configuration.user_home, client_dir)) + os.sep abs_path = os.path.join(base_dir, path) settings_dict = load_settings(client_id, configuration) javascript = None title_entry = find_entry(output_objects, 'title') title_entry['text'] = 'FILEMETAIO Management' title_entry['javascript'] = javascript output_objects.append({'object_type': 'header', 'text': 'FILEMETAIO Management'}) status = returnvalues.ERROR if flags == 'i': if action == 'list': image_meta = get_image_file_settings(logger, abs_path) if image_meta is not None: extension_list = [] settings_status_list = [] settings_progress_list = [] image_count_list = [] for entry in image_meta: extension_list.append(entry['extension']) settings_status_list.append(entry['settings_status' ]) settings_progress_list.append(entry['settings_update_progress' ]) image_count_list.append(get_image_file_count(logger, abs_path, entry['extension'])) output_objects.append({ 'object_type': 'image_settings_list', 'extension_list': extension_list, 'settings_status_list': settings_status_list, 'settings_progress_list': settings_progress_list, 'image_count_list': image_count_list, }) status = returnvalues.OK else: status = returnvalues.ERROR ERROR_MSG = "No image settings found for path: '%s'" \ % path output_objects.append({'object_type': 'text', 'text': ERROR_MSG}) logger.error('filemetaio.py: %s -> %s' % (action, ERROR_MSG)) elif action == 'remove_dir': remove_ext = None vgrid_name = path.split('/')[0] if extension != '': remove_ext = extension try: (result, removed_ext_list) = \ remove_image_file_settings(logger, abs_path, remove_ext) except Exception, ex: logger.debug(str(traceback.format_exc())) if result is not None: result = returnvalues.OK else: result = returnvalues.ERROR ERROR_MSG = \ 'Unable to remove image settings for path: %s' \ % path output_objects.append({'object_type': 'text', 'text': ERROR_MSG}) logger.error('filemetaio.py: %s -> %s' % (action, ERROR_MSG)) for removed_ext in removed_ext_list: abs_last_modified_filepath = \ __get_image_settings_trigger_last_modified_filepath(logger, abs_path, removed_ext) # Remove trigger if delete_file(abs_last_modified_filepath, logger): # FYSIKER HACK: Sleep 1 to prevent trigger rule/event race # TODO: Modify events handler to accept trigger action + delete time.sleep(1) # Remove old vgrid submit trigger for files rule_id = __get_image_file_trigger_rule_id(logger, path, removed_ext) status = __remove_image_file_trigger( configuration, vgrid_name, path, extension, rule_id, output_objects, ) if status != returnvalues.OK: result = status # Remove old vgrid submit trigger for settings rule_id = \ __get_image_settings_trigger_rule_id(logger, path, removed_ext) status = __remove_image_settings_trigger( configuration, vgrid_name, path, extension, rule_id, output_objects, ) if status != returnvalues.OK: result = status else: result = returnvalues.ERROR ERROR_MSG = 'Unable to remove file: %s ' \ % abs_last_modified_filepath output_objects.append({'object_type': 'text', 'text': ERROR_MSG}) logger.error('filemetaio.py: %s -> %s' % (action, ERROR_MSG)) elif action == 'get_dir': image_count = get_image_file_count(logger, abs_path, extension) image_meta = get_image_file_setting(logger, abs_path, extension) if image_meta is not None: extension = str(image_meta['extension']) settings_status = str(image_meta['settings_status']) settings_update_progress = \ str(image_meta['settings_update_progress']) settings_recursive = str(image_meta['settings_recursive' ]) image_count = str(image_count) image_type = str(image_meta['image_type']) offset = str(image_meta['offset']) x_dimension = str(image_meta['x_dimension']) y_dimension = str(image_meta['y_dimension']) preview_image_extension = \ str(image_meta['preview_image_extension']) preview_x_dimension = \ str(image_meta['preview_x_dimension']) preview_y_dimension = \ str(image_meta['preview_y_dimension']) preview_cutoff_min = str(image_meta['preview_cutoff_min' ]) preview_cutoff_max = str(image_meta['preview_cutoff_max' ]) data_type = str(image_meta['data_type']) output_objects.append({ 'object_type': 'image_setting', 'path': path, 'extension': extension, 'settings_status': settings_status, 'settings_update_progress': settings_update_progress, 'settings_recursive': settings_recursive, 'image_count': image_count, 'image_type': image_type, 'offset': offset, 'x_dimension': x_dimension, 'y_dimension': y_dimension, 'preview_image_extension': preview_image_extension, 'preview_x_dimension': preview_x_dimension, 'preview_y_dimension': preview_y_dimension, 'preview_cutoff_min': preview_cutoff_min, 'preview_cutoff_max': preview_cutoff_max, 'data_type': data_type, }) status = returnvalues.OK else: status = returnvalues.ERROR ERROR_MSG = \ "No image setting information for path: '%s', extension: '%s'" \ % (path, extension) output_objects.append({'object_type': 'text', 'text': ERROR_MSG}) logger.error('filemetaio.py: %s -> %s' % (action, ERROR_MSG)) elif action == 'put_dir': settings_status = ''.join(accepted['settings_status']) if ''.join(accepted['settings_recursive']) == 'True': settings_recursive = True else: settings_recursive = False image_type = ''.join(accepted['image_type']) data_type = ''.join(accepted['data_type']) offset = int(''.join(accepted['offset'])) x_dimension = int(''.join(accepted['x_dimension'])) y_dimension = int(''.join(accepted['y_dimension'])) preview_image_extension = \ ''.join(accepted['preview_image_extension']) preview_x_dimension = \ int(''.join(accepted['preview_x_dimension'])) preview_y_dimension = \ int(''.join(accepted['preview_y_dimension'])) preview_cutoff_min = \ float(''.join(accepted['preview_cutoff_min'])) preview_cutoff_max = \ float(''.join(accepted['preview_cutoff_max'])) path_array = path.split('/') vgrid_name = path_array[0] vgrid_data_path = '/'.join(path_array[1:]) vgrid_meta_path = os.path.join(vgrid_data_path, __metapath) vgrid_image_meta_path = os.path.join(vgrid_data_path, __image_metapath) OK_MSG = \ "Created/updated settings for image extension: '%s' for path '%s'" \ % (extension, path) ERROR_MSG = \ "Failed to change settings for image extension: '%s' for path: '%s'" \ % (extension, path) (is_valid, is_valid_msg) = __is_valid_image_settings_update( configuration, base_dir, vgrid_name, vgrid_data_path, extension, settings_recursive, ) if is_valid: status = returnvalues.OK else: status = returnvalues.ERROR output_objects.append({'object_type': 'error_text', 'text': ERROR_MSG}) output_objects.append({'object_type': 'error_text', 'text': is_valid_msg}) logger.error('filemetaio.py: %s -> %s' % (action, ERROR_MSG)) logger.error('filemetaio.py: %s -> %s' % (action, is_valid_msg)) # Ensure meta path existence if status == returnvalues.OK: makedirs_rec(os.path.join(base_dir, os.path.join(vgrid_name, vgrid_meta_path)), configuration) # Ensure image meta path existence makedirs_rec(os.path.join(base_dir, os.path.join(vgrid_name, vgrid_image_meta_path)), configuration) try: add_status = add_image_file_setting( logger, abs_path, extension, settings_status, None, settings_recursive, image_type, data_type, offset, x_dimension, y_dimension, preview_image_extension, preview_x_dimension, preview_y_dimension, preview_cutoff_min, preview_cutoff_max, overwrite=True, ) except Exception, ex: add_status = False logger.debug(str(traceback.format_exc())) if add_status: status = returnvalues.OK output_objects.append({'object_type': 'text', 'text': OK_MSG}) else: status = returnvalues.ERROR output_objects.append({'object_type': 'error_text', 'text': ERROR_MSG}) logger.error('filemetaio.py: %s -> %s' % (action, ERROR_MSG)) if status == returnvalues.OK: # Generate vgrid trigger for files if settings_recursive: vgrid_trigger_path = os.path.join(vgrid_data_path, '*/*.%s' % extension) else: vgrid_trigger_path = os.path.join(vgrid_data_path, '*.%s' % extension) rule_id = __get_image_file_trigger_rule_id(logger, path, extension) rule_dict = { 'rule_id': rule_id, 'vgrid_name': vgrid_name, 'path': vgrid_trigger_path, 'changes': ['created', 'modified', 'deleted', 'moved'], 'run_as': client_id, 'action': 'submit', 'arguments': 'template_from_filemetaio.py', 'templates': [__get_image_update_preview_mrsl_template(path)], 'settle_time': '60s', 'rate_limit': '', } # Remove old vgrid submit trigger for files status = __remove_image_file_trigger( configuration, vgrid_name, path, extension, rule_id, output_objects, ) if status == returnvalues.OK: # Add generated vgrid submit trigger for files (add_status, add_msg) = \ vgrid_add_triggers(configuration, vgrid_name, [rule_dict]) if add_status: status = returnvalues.OK OK_MSG = \ "Created/updated image file trigger for extension: '%s', path '%s'" \ % (extension, path) output_objects.append({'object_type': 'text', 'text': OK_MSG}) else: status = returnvalues.ERROR ERROR_MSG = \ "Failed change image file trigger for extension: '%s', path '%s'" \ % (extension, path) ERROR_MSG2 = "Makes sure '%s' is a VGrid" \ % vgrid_name output_objects.append({'object_type': 'error_text', 'text': ERROR_MSG}) output_objects.append({'object_type': 'error_text', 'text': ERROR_MSG2}) logger.error('filemetaio.py: %s -> %s' % (action, ERROR_MSG)) logger.error('filemetaio.py: %s -> %s' % (action, ERROR_MSG2)) if status == returnvalues.OK: # Generate vgrid trigger for settings vgrid_path = '/'.join(path.split('/')[1:]) vgrid_trigger_filepath = \ __get_image_settings_trigger_last_modified_filepath(logger, vgrid_path, extension) rule_id = __get_image_settings_trigger_rule_id(logger, path, extension) rule_dict = { 'rule_id': rule_id, 'vgrid_name': vgrid_name, 'path': vgrid_trigger_filepath, 'changes': ['modified', 'deleted'], 'run_as': client_id, 'action': 'submit', 'arguments': 'template_from_filemetaio.py', 'templates': [__get_image_create_previews_mrsl_template(path, extension)], 'settle_time': '1s', 'rate_limit': '', } # Remove old vgrid submit trigger for settings status = __remove_image_settings_trigger( configuration, vgrid_name, path, extension, rule_id, output_objects, ) if status == returnvalues.OK: # Add generated vgrid submit trigger for settings (add_status, add_msg) = \ vgrid_add_triggers(configuration, vgrid_name, [rule_dict]) if add_status: status = returnvalues.OK OK_MSG = \ "Created/updated old image setting trigger for extension: '%s', path '%s'" \ % (extension, path) output_objects.append({'object_type': 'text', 'text': OK_MSG}) else: status = returnvalues.ERROR ERROR_MSG = \ "Failed change old image setting trigger for extension: '%s', path '%s'" \ % (extension, path) ERROR_MSG2 = "Makes sure '%s' is a VGrid" \ % vgrid_name output_objects.append({'object_type': 'error_text', 'text': ERROR_MSG}) output_objects.append({'object_type': 'error_text', 'text': ERROR_MSG2}) logger.error('filemetaio.py: %s -> %s' % (action, ERROR_MSG)) logger.error('filemetaio.py: %s -> %s' % (action, ERROR_MSG2)) if status == returnvalues.OK: # Trigger Trigger (Trigger Happty) abs_vgrid_trigger_filepath = os.path.join(base_dir, os.path.join(vgrid_name, vgrid_trigger_filepath)) # FYSIKER HACK: Sleep 1 to prevent trigger rule/event race # TODO: Modify events handler to accept add+trigger action time.sleep(1) timestamp = time.time() touch(abs_vgrid_trigger_filepath, timestamp)