def rm_tracker_admin(configuration, cert_id, vgrid_name, tracker_dir, output_objects): """Remove Trac issue tracker owner""" _logger = configuration.logger cgi_tracker_var = os.path.join(tracker_dir, 'var') if not os.path.isdir(cgi_tracker_var): output_objects.append({ 'object_type': 'text', 'text': 'No tracker (%s) for %s %s - skipping tracker admin rights' % (tracker_dir, configuration.site_vgrid_label, vgrid_name) }) return (output_objects, returnvalues.SYSTEM_ERROR) # Trac requires tweaking for certain versions of setuptools # http://trac.edgewall.org/wiki/setuptools admin_env = {} # strip non-string args from env to avoid wsgi execv errors like # http://stackoverflow.com/questions/13213676 for (key, val) in os.environ.items(): if isinstance(val, basestring): admin_env[key] = val admin_env["PKG_RESOURCES_CACHE_ZIP_MANIFESTS"] = "1" try: admin_user = distinguished_name_to_user(cert_id) admin_id = admin_user.get(configuration.trac_id_field, 'unknown_id') # Remove admin rights for owner using trac-admin command: # trac-admin tracker_dir deploy cgi_tracker_bin perms_cmd = [ configuration.trac_admin_path, cgi_tracker_var, 'permission', 'remove', admin_id, 'TRAC_ADMIN' ] _logger.info('remove admin rights from owner: %s' % perms_cmd) # NOTE: we use command list here to avoid shell requirement proc = subprocess_popen(perms_cmd, stdout=subprocess_pipe, stderr=subprocess_stdout, env=admin_env) retval = proc.wait() if retval != 0: out = proc.stdout.read() if out.find("user has not been granted the permission") != -1: _logger.warning( "ignore missing Trac admin for legacy user %s" % admin_id) else: raise Exception("tracker permissions %s failed: %s (%d)" % (perms_cmd, out, retval)) return True except Exception as exc: output_objects.append({ 'object_type': 'error_text', 'text': 'Could not remove %s tracker admin rights: %s' % (cert_id, exc) }) return False
def add_tracker_admin(configuration, cert_id, vgrid_name, tracker_dir, output_objects): """Add new Trac issue tracker owner""" cgi_tracker_var = os.path.join(tracker_dir, 'var') if not os.path.isdir(cgi_tracker_var): output_objects.append({ 'object_type': 'text', 'text': 'No tracker (%s) for %s %s - skipping tracker admin rights' % (tracker_dir, configuration.site_vgrid_label, vgrid_name) }) return False # Trac requires tweaking for certain versions of setuptools # http://trac.edgewall.org/wiki/setuptools admin_env = {} # strip non-string args from env to avoid wsgi execv errors like # http://stackoverflow.com/questions/13213676 for (key, val) in os.environ.items(): if isinstance(val, basestring): admin_env[key] = val admin_env["PKG_RESOURCES_CACHE_ZIP_MANIFESTS"] = "1" try: admin_user = distinguished_name_to_user(cert_id) admin_id = admin_user.get(configuration.trac_id_field, 'unknown_id') # Give admin rights to owner using trac-admin command: # trac-admin tracker_dir deploy cgi_tracker_bin perms_cmd = [ configuration.trac_admin_path, cgi_tracker_var, 'permission', 'add', admin_id, 'TRAC_ADMIN' ] configuration.logger.info('provide admin rights to owner: %s' % perms_cmd) # NOTE: We already verified command variables to be shell-safe proc = subprocess_popen(perms_cmd, stdout=subprocess_pipe, stderr=subprocess_stdout, env=admin_env) proc.wait() if proc.returncode != 0: raise Exception("tracker permissions %s failed: %s (%d)" % (perms_cmd, proc.stdout.read(), proc.returncode)) return True except Exception as exc: output_objects.append({ 'object_type': 'error_text', 'text': 'Could not give %s tracker admin rights: %s' % (cert_id, exc) }) return False
def pcopy(self, source): """Open the ngcp command as a popen process, redirecting output to stdout and return process file handle. @type source: string @param source: URL to open""" # NOTE: I replaced a possibly unsafe call # f = popen('ngcp %s /dev/stdout' % source, self._env) # and haven't tested afterwards # -Jonas command_list = ['ngcp', source, '/dev/stdout'] # NOTE: we use command list to avoid the need for shell return subprocess_popen(command_list, stdout=subprocess_pipe, env=self._env).stdout
def create_grid_proxy(cert_path, key_path, proxy_path): """ Create a default proxy cert. Uses grid-proxy-init. In this way no additional voms information is added. Returns the absolute path of the generated proxy. By standard placed in the /tmp/ folder. """ try: command_list = [ "../java-bin/generate_proxy", cert_path, key_path, proxy_path ] # NOTE: we use command list to avoid the need for shell proc = subprocess_popen(command_list, stdout=subprocess_pipe, stderr=subprocess_pipe) (out, _) = proc.communicate() logger.info(out.replace("\n", ".")) except Exception as exc: logger.error("Could not generate a proxy certificate: \n%s" % exc) raise
if not os.path.exists(daemon_path): err_msg = "VMachines proxy helper not found!" logger.error(err_msg) print(err_msg) sys.exit(1) keep_running = True print('Starting VM proxy helper daemon - Ctrl-C to quit') logger.info("Starting VM proxy daemon") daemon_proc = None while keep_running: try: # Run vm-proxy helper in the foreground from corresponding dir daemon_proc = subprocess_popen([daemon_path, '-n'], cwd=vm_proxy_base) retval = daemon_proc.wait() logger.info("daemon returned %s" % retval) daemon_proc = None except KeyboardInterrupt: keep_running = False break except Exception as exc: msg = 'Caught unexpected exception: %s' % exc logger.error(msg) print(msg) # Throttle down time.sleep(30) if daemon_proc is not None: logger.info('Killing spawned proxy daemon')
if sitestats_home: sitestats_path = os.path.join(sitestats_home, 'usagestats-%d' % now) if not output_formats: output_formats = ['json'] print("Writing collected site stats in %s.{%s}" % (sitestats_path, ','.join(output_formats))) if not verbose and sitestats_path is None: print("Neither verbose nor writing site stats - boring!") df_opts = [] # NOTE: df expects multiple file system types as individual options for fs_type in only_fs_types: df_opts += ['-t', fs_type] proc = subprocess_popen(['/bin/df'] + df_opts, stdout=subprocess_pipe, env=cmd_env) proc.wait() for line in proc.stdout.readlines(): site_stats['disk']['use'].append(line.strip().split()) if verbose: print("=== Disk Use ===") print('\n'.join(['\t'.join(i) for i in site_stats['disk']['use']])) # NOTE: mount expects multiple file system types as single comma-sep arg mount_opts = [] mount_opts += ['-t', ','.join(only_fs_types)] proc = subprocess_popen(['mount'] + mount_opts, stdout=subprocess_pipe) proc.wait() for line in proc.stdout.readlines(): site_stats['disk']['mounts'].append(line.strip().split())
def main(client_id, user_arguments_dict): """Main function used by front end""" (configuration, logger, output_objects, op_name) = \ initialize_main_variables(client_id, op_header=False) defaults = signature()[1] (validate_status, accepted) = validate_input_and_cert( user_arguments_dict, defaults, output_objects, client_id, configuration, allow_rejects=False, ) if not validate_status: return (accepted, returnvalues.CLIENT_ERROR) action = accepted['action'][-1] req_list = accepted['req_id'] job_list = accepted['job_id'] lines = int(accepted['lines'][-1]) meta = '''<meta http-equiv="refresh" content="%s" /> ''' % configuration.sleep_secs title_entry = find_entry(output_objects, 'title') title_entry['text'] = '%s administration panel' % configuration.short_title title_entry['meta'] = meta # jquery support for tablesorter and confirmation on "remove" # table initially sorted by col. 9 (created) table_spec = {'table_id': 'accountreqtable', 'sort_order': '[[9,0]]'} (add_import, add_init, add_ready) = man_base_js(configuration, [table_spec]) add_ready += ''' $(".migadmin-tabs").tabs(); ''' title_entry['script']['advanced'] += add_import title_entry['script']['init'] += add_init title_entry['script']['ready'] += add_ready output_objects.append({ 'object_type': 'html_form', 'text': man_base_html(configuration) }) if not is_admin(client_id, configuration, logger): output_objects.append({ 'object_type': 'error_text', 'text': 'You must be an admin to access this control panel.' }) return (output_objects, returnvalues.CLIENT_ERROR) html = '' if action and not action in grid_actions.keys() + accountreq_actions: output_objects.append({ 'object_type': 'error_text', 'text': 'Invalid action: %s' % action }) return (output_objects, returnvalues.SYSTEM_ERROR) if action in grid_actions: msg = "%s" % grid_actions[action] if job_list: msg += ' %s' % ' '.join(job_list) msg += '\n' if not send_message_to_grid_script(msg, logger, configuration): output_objects.append({ 'object_type': 'error_text', 'text': '''Error sending %s message to grid_script.''' % action }) status = returnvalues.SYSTEM_ERROR elif action in accountreq_actions: if action == "addaccountreq": for req_id in req_list: if accept_account_req(req_id, configuration): output_objects.append({ 'object_type': 'text', 'text': 'Accepted account request %s' % req_id }) else: output_objects.append({ 'object_type': 'error_text', 'text': 'Accept account request failed - details in log' }) elif action == "delaccountreq": for req_id in req_list: if delete_account_req(req_id, configuration): output_objects.append({ 'object_type': 'text', 'text': 'Deleted account request %s' % req_id }) else: output_objects.append({ 'object_type': 'error_text', 'text': 'Delete account request failed - details in log' }) show, drop = '', '' general = """ <h2>Server Status</h2> <p class='importanttext'> This page automatically refreshes every %s seconds. </p> <p> You can see the current grid daemon status and server logs below. The buttons provide access to e.g. managing the grid job queues. </p> <form method='get' action='migadmin.py'> <input type='hidden' name='action' value='' /> <input type='submit' value='Show last log lines' /> <input type='text' size='2' name='lines' value='%s' /> </form> <br /> <form method='get' action='migadmin.py'> <input type='hidden' name='lines' value='%s' /> <input type='hidden' name='action' value='reloadconfig' /> <input type='submit' value='Reload Configuration' /> </form> <br /> """ % (configuration.sleep_secs, lines, lines) show += """ <form method='get' action='migadmin.py'> <input type='hidden' name='lines' value='%s' /> <input type='submit' value='Log Jobs' /> <select name='action'> """ % lines drop += """ <form method='get' action='migadmin.py'> <input type='hidden' name='lines' value='%s' /> <input type='submit' value='Drop Job' /> <select name='action'> """ % lines for queue in ['queued', 'executing', 'done']: selected = '' if action.find(queue) != -1: selected = 'selected' show += "<option %s value='show%s'>%s</option>" % (selected, queue, queue) drop += "<option %s value='drop%s'>%s</option>" % (selected, queue, queue) show += """ </select> </form> <br /> """ drop += """ </select> <input type='text' size='20' name='job_id' value='' /> </form> <br /> """ html += general html += show html += drop daemons = """ <div id='daemonstatus'> """ daemon_names = [] if configuration.site_enable_jobs: daemon_names += ['grid_script.py', 'grid_monitor.py', 'grid_sshmux.py'] if configuration.site_enable_events: daemon_names.append('grid_events.py') # No need to run im_notify unless any im notify protocols are enabled if configuration.site_enable_imnotify and \ [i for i in configuration.notify_protocols if i != 'email']: daemon_names.append('grid_imnotify.py') if configuration.site_enable_sftp: daemon_names.append('grid_sftp.py') if configuration.site_enable_davs: daemon_names.append('grid_webdavs.py') if configuration.site_enable_ftps: daemon_names.append('grid_ftps.py') if configuration.site_enable_openid: daemon_names.append('grid_openid.py') if configuration.site_enable_transfers: daemon_names.append('grid_transfers.py') if configuration.site_enable_crontab: daemon_names.append('grid_cron.py') if configuration.site_enable_seafile: daemon_names += [ 'seafile-controller', 'seaf-server', 'ccnet-server', 'seahub' ] if configuration.seafile_mount: daemon_names.append('seaf-fuse') if configuration.site_enable_sftp_subsys: daemon_names.append( '/sbin/sshd -f /etc/ssh/sshd_config-MiG-sftp-subsys') for proc in daemon_names: # NOTE: we use command list here to avoid shell requirement pgrep_proc = subprocess_popen(['pgrep', '-f', proc], stdout=subprocess_pipe, stderr=subprocess_stdout) pgrep_proc.wait() ps_out = pgrep_proc.stdout.read().strip() if pgrep_proc.returncode == 0: daemons += "<div class='status_online'>%s running (pid %s)</div>" \ % (proc, ps_out) else: daemons += "<div class='status_offline'>%s not running!</div>" % \ proc daemons += """</div> <br /> """ html += daemons log_path_list = [] if os.path.isabs(configuration.logfile): log_path_list.append(configuration.logfile) else: log_path_list.append( os.path.join(configuration.log_dir, configuration.logfile)) for log_path in log_path_list: html += ''' <h2>%s</h2> <textarea class="fillwidth padspace" rows=%s readonly="readonly"> ''' % (log_path, lines) log_lines = read_tail(log_path, lines, logger) html += ''.join(log_lines[-lines:]) html += '''</textarea> ''' output_objects.append({ 'object_type': 'html_form', 'text': """ <div id='wrap-tabs' class='migadmin-tabs'> <ul> <li><a href='#serverstatus-tab'>Server Status</a></li> <li><a href='#accountreqs-tab'>Account Requests</a></li> <li><a href='#sitestats-tab'>Site Stats</a></li> </ul> """ }) output_objects.append({ 'object_type': 'html_form', 'text': ''' <div id="serverstatus-tab"> ''' }) output_objects.append({'object_type': 'html_form', 'text': html}) output_objects.append({ 'object_type': 'html_form', 'text': ''' </div> ''' }) html = '' output_objects.append({ 'object_type': 'html_form', 'text': ''' <div id="accountreqs-tab"> ''' }) output_objects.append({ 'object_type': 'header', 'text': 'Pending Account Requests' }) (list_status, ret) = list_account_reqs(configuration) if not list_status: logger.error("%s: failed for '%s': %s" % (op_name, client_id, ret)) output_objects.append({'object_type': 'error_text', 'text': ret}) return (output_objects, returnvalues.SYSTEM_ERROR) form_method = 'post' csrf_limit = get_csrf_limit(configuration) target_op = 'migadmin' csrf_token = make_csrf_token(configuration, form_method, target_op, client_id, csrf_limit) accountreqs = [] for req_id in ret: (load_status, req_dict) = get_account_req(req_id, configuration) if not load_status: logger.error("%s: load failed for '%s': %s" % (op_name, req_id, req_dict)) output_objects.append({ 'object_type': 'error_text', 'text': 'Could not read details for "%s"' % req_id }) return (output_objects, returnvalues.SYSTEM_ERROR) req_item = build_accountreqitem_object(configuration, req_dict) js_name = 'create%s' % req_id helper = html_post_helper(js_name, '%s.py' % target_op, { 'action': 'addaccountreq', 'req_id': req_id, csrf_field: csrf_token }) output_objects.append({'object_type': 'html_form', 'text': helper}) req_item['addaccountreqlink'] = { 'object_type': 'link', 'destination': "javascript: confirmDialog(%s, '%s');" % (js_name, 'Really accept %s?' % req_id), 'class': 'addlink iconspace', 'title': 'Accept %s' % req_id, 'text': '' } js_name = 'delete%s' % req_id helper = html_post_helper(js_name, '%s.py' % target_op, { 'action': 'delaccountreq', 'req_id': req_id, csrf_field: csrf_token }) output_objects.append({'object_type': 'html_form', 'text': helper}) req_item['delaccountreqlink'] = { 'object_type': 'link', 'destination': "javascript: confirmDialog(%s, '%s');" % (js_name, 'Really remove %s?' % req_id), 'class': 'removelink iconspace', 'title': 'Remove %s' % req_id, 'text': '' } accountreqs.append(req_item) output_objects.append({ 'object_type': 'table_pager', 'entry_name': 'pending certificate/OpenID account requests', 'default_entries': default_pager_entries }) output_objects.append({ 'object_type': 'accountreqs', 'accountreqs': accountreqs }) output_objects.append({'object_type': 'html_form', 'text': html}) output_objects.append({ 'object_type': 'html_form', 'text': ''' </div> ''' }) output_objects.append({ 'object_type': 'html_form', 'text': ''' <div id="sitestats-tab"> ''' }) html = '' html += """ <h2>Site Statistics</h2> """ sitestats_home = configuration.sitestats_home if sitestats_home and os.path.isdir(sitestats_home): html += ''' <div id=""all-stats"> ''' all_stats = {} # Grab first available format for each stats file for filename in listdir(sitestats_home): prefix, ext = os.path.splitext(filename) file_format = ext.lstrip('.') if not file_format in ['pickle', 'json', 'yaml']: continue path = os.path.join(sitestats_home, filename) stats = all_stats[prefix] = all_stats.get(prefix, {}) if not stats: stats = load(path, serializer=file_format) all_stats[prefix].update(force_utf8_rec(stats)) sorted_stats = all_stats.items() sorted_stats.sort() for (name, stats) in sorted_stats: html += format_stats(name, stats) html += ''' </div> ''' else: html += '<span class="warningtext">Site stats not available</span>' output_objects.append({'object_type': 'html_form', 'text': html}) output_objects.append({ 'object_type': 'html_form', 'text': ''' </div> ''' }) # Finish tabs wrap output_objects.append({'object_type': 'html_form', 'text': ''' </div> '''}) return (output_objects, returnvalues.OK)
def run_transfer(configuration, client_id, transfer_dict): """Actual data transfer built from transfer_dict on behalf of client_id""" logger.debug('run transfer for %s: %s' % (client_id, blind_pw(transfer_dict))) transfer_id = transfer_dict['transfer_id'] action = transfer_dict['action'] protocol = transfer_dict['protocol'] status_dir = get_status_dir(configuration, client_id, transfer_id) cmd_map = get_cmd_map() if not protocol in cmd_map[action]: raise ValueError('unsupported protocol: %s' % protocol) client_dir = client_id_dir(client_id) makedirs_rec(status_dir, configuration) # Please note that base_dir must end in slash to avoid access to other # user dirs when own name is a prefix of another user name base_dir = os.path.abspath( os.path.join(configuration.user_home, client_dir)) + os.sep # TODO: we should refactor to move command extraction into one function command_pattern = cmd_map[action][protocol] target_helper_list = [] key_path = transfer_dict.get("key", "") if key_path: # Use key with given name from settings dir settings_base_dir = os.path.abspath( os.path.join(configuration.user_settings, client_dir)) + os.sep key_path = os.path.join(settings_base_dir, user_keys_dir, key_path.lstrip(os.sep)) # IMPORTANT: path must be expanded to abs for proper chrooting key_path = os.path.abspath(key_path) if not valid_user_path(configuration, key_path, settings_base_dir): logger.error('rejecting illegal directory traversal for %s (%s)' % (key_path, blind_pw(transfer_dict))) raise ValueError("user provided a key outside own settings!") rel_src_list = transfer_dict['src'] rel_dst = transfer_dict['dst'] compress = transfer_dict.get("compress", False) exclude = transfer_dict.get("exclude", []) if transfer_dict['action'] in ('import', ): logger.debug('setting abs dst for action %(action)s' % transfer_dict) src_path_list = transfer_dict['src'] dst_path = os.path.join(base_dir, rel_dst.lstrip(os.sep)) dst_path = os.path.abspath(dst_path) for src in rel_src_list: abs_dst = os.path.join(dst_path, src.lstrip(os.sep)) # IMPORTANT: path must be expanded to abs for proper chrooting abs_dst = os.path.abspath(abs_dst) # Reject illegal directory traversal and hidden files if not valid_user_path(configuration, abs_dst, base_dir, True): logger.error( 'rejecting illegal directory traversal for %s (%s)' % (abs_dst, blind_pw(transfer_dict))) raise ValueError("user provided a destination outside home!") if src.endswith(os.sep): target_helper_list.append( (get_lftp_target(True, False, exclude), get_rsync_target(True, False, exclude, compress))) else: target_helper_list.append( (get_lftp_target(True, True, exclude), get_rsync_target(True, True, exclude, compress))) makedirs_rec(dst_path, configuration) elif transfer_dict['action'] in ('export', ): logger.debug('setting abs src for action %(action)s' % transfer_dict) dst_path = transfer_dict['dst'] src_path_list = [] for src in rel_src_list: src_path = os.path.join(base_dir, src.lstrip(os.sep)) # IMPORTANT: path must be expanded to abs for proper chrooting src_path = os.path.abspath(src_path) # Reject illegal directory traversal and hidden files if not valid_user_path(configuration, src_path, base_dir, True): logger.error( 'rejecting illegal directory traversal for %s (%s)' % (src, blind_pw(transfer_dict))) raise ValueError("user provided a source outside home!") src_path_list.append(src_path) if src.endswith(os.sep) or os.path.isdir(src): target_helper_list.append( (get_lftp_target(False, False, exclude), get_rsync_target(False, False, exclude, compress))) else: target_helper_list.append( (get_lftp_target(False, True, exclude), get_rsync_target(False, True, exclude, compress))) else: raise ValueError('unsupported action for %(transfer_id)s: %(action)s' % transfer_dict) run_dict = transfer_dict.copy() run_dict['log_path'] = os.path.join(status_dir, 'transfer.log') # Use private known hosts file for ssh transfers as explained above # NOTE: known_hosts containing '=' silently leads to rest getting ignored! # use /dev/null to skip host key verification completely for now. #run_dict['known_hosts'] = os.path.join(base_dir, '.ssh', 'known_hosts') run_dict['known_hosts'] = '/dev/null' # Make sure password is set to empty string as default run_dict['password'] = run_dict.get('password', '') # TODO: this is a bogus cert path for now - we don't support ssl certs run_dict['cert'] = run_dict.get('cert', '') # IMPORTANT: must be implicit proto or 'ftp://' (not ftps://) and similarly # webdav(s) must use explicit http(s) instead. In both cases we # replace protocol between cmd selection and lftp path expansion if run_dict['protocol'] == 'ftps': run_dict['orig_proto'] = run_dict['protocol'] run_dict['protocol'] = 'ftp' logger.info( 'force %(orig_proto)s to %(protocol)s for %(transfer_id)s' % run_dict) elif run_dict['protocol'].startswith('webdav'): run_dict['orig_proto'] = run_dict['protocol'] run_dict['protocol'] = run_dict['protocol'].replace('webdav', 'http') logger.info( 'force %(orig_proto)s to %(protocol)s for %(transfer_id)s' % run_dict) if key_path: rel_key = run_dict['key'] rel_cert = run_dict['cert'] run_dict['key'] = key_path run_dict['cert'] = key_path.replace(rel_key, rel_cert) run_dict['ssh_auth'] = get_ssh_auth(True, run_dict) run_dict['ssl_auth'] = get_ssl_auth(True, run_dict) else: # Extract encrypted password password_digest = run_dict.get('password_digest', '') if password_digest: _, _, _, payload = password_digest.split("$") unscrambled = unscramble_digest(configuration.site_digest_salt, payload) _, _, password = unscrambled.split(":") run_dict['password'] = password run_dict['ssh_auth'] = get_ssh_auth(False, run_dict) run_dict['ssl_auth'] = get_ssl_auth(False, run_dict) run_dict['rel_dst'] = rel_dst run_dict['dst'] = dst_path run_dict['lftp_buf_size'] = run_dict.get('lftp_buf_size', lftp_buffer_bytes) run_dict['lftp_sftp_block_size'] = run_dict.get('sftp_sftp_block_size', lftp_sftp_block_bytes) status = 0 for (src, rel_src, target_helper) in zip(src_path_list, rel_src_list, target_helper_list): (lftp_target, rsync_target) = target_helper logger.debug('setting up %(action)s for %(src)s' % run_dict) if run_dict['protocol'] == 'sftp' and not os.path.isabs(src): # NOTE: lftp interprets sftp://FQDN/SRC as absolute path /SRC # We force relative paths into user home with a tilde. # The resulting sftp://FQDN/~/SRC looks funky but works. run_dict['src'] = "~/%s" % src else: # All other paths are probably absolute or auto-chrooted anyway run_dict['src'] = src run_dict['rel_src'] = rel_src run_dict['lftp_args'] = ' '.join(lftp_target[0]) % run_dict run_dict['lftp_excludes'] = ' '.join(lftp_target[1]) # src and dst may actually be reversed for lftp, but for symmetry ... run_dict['lftp_src'] = lftp_target[2][0] % run_dict run_dict['lftp_dst'] = lftp_target[2][1] % run_dict run_dict['rsync_args'] = ' '.join(rsync_target[0]) % run_dict # Preserve excludes on list form for rsync, where it matters run_dict[RSYNC_EXCLUDES_LIST] = rsync_target[1] run_dict['rsync_src'] = rsync_target[2][0] % run_dict run_dict['rsync_dst'] = rsync_target[2][1] % run_dict blind_dict = blind_pw(run_dict) logger.debug('expanded vars to %s' % blind_dict) # NOTE: Make sure NOT to break rsync excludes on list form as they # won't work if concatenated to a single string in command_list! command_list, blind_list = [], [] for i in command_pattern: if i == RSYNC_EXCLUDES_LIST: command_list += run_dict[RSYNC_EXCLUDES_LIST] blind_list += run_dict[RSYNC_EXCLUDES_LIST] else: command_list.append(i % run_dict) blind_list.append(i % blind_dict) command_str = ' '.join(command_list) blind_str = ' '.join(blind_list) logger.info('run %s on behalf of %s' % (blind_str, client_id)) transfer_proc = subprocess_popen(command_list, stdout=subprocess_pipe, stderr=subprocess_pipe) # Save transfer_proc.pid for use in clean up during shutdown # in that way we can resume pretty smoothly in next run. sub_pid = transfer_proc.pid logger.info('%s %s running transfer process %s' % (client_id, transfer_id, sub_pid)) add_sub_pid(configuration, sub_pid_map, client_id, transfer_id, sub_pid) out, err = transfer_proc.communicate() exit_code = transfer_proc.wait() status |= exit_code del_sub_pid(configuration, sub_pid_map, client_id, transfer_id, sub_pid) logger.info('done running transfer %s: %s' % (transfer_id, blind_str)) logger.debug('raw output is: %s' % out) logger.debug('raw error is: %s' % err) logger.debug('result was %s' % exit_code) if not transfer_result(configuration, client_id, run_dict, exit_code, out.replace(base_dir, ''), err.replace(base_dir, '')): logger.error('writing transfer status for %s failed' % transfer_id) logger.debug('done handling transfers in %(transfer_id)s' % transfer_dict) transfer_dict['exit_code'] = status if status == 0: transfer_dict['status'] = 'DONE' else: transfer_dict['status'] = 'FAILED'