def main(): log = logging.getLogger('stderr') # open mailbox and read args allids = None i = 1 while i < len(sys.argv): arg = sys.argv[i] if arg == '--docs': i += 1 allids = open(sys.argv[i], 'r').read() else: common.fatal("Unknown arg %s"%arg) i += 1 if allids == None: allids = sys.stdin.read() re_id = re.compile('([0-9A-Fa-f]+)\s+') for count, line in enumerate(allids.splitlines(True)): mo = re_id.match(line) if not mo: log.info("Ignoring line %d: %s" % (count, line)) continue docid = mo.group(1) # (do not except Exceptions: they should go into error log) delete(docid, 'stderr') log.info("Deletion successful: %s"%docid)
def check_required_elements(topology): invalid_topo = False for rq in ['nodes', 'defaults']: if not rq in topology: common.error("Missing '%s' element" % rq, category=common.MissingValue, module="topology") invalid_topo = True if invalid_topo: common.fatal("Fatal topology errors, aborting") if not 'name' in topology: topo_name = os.path.basename( os.path.dirname(os.path.realpath(topology['input'][0]))) topology.name = topo_name topology.defaults.name = topology.name topo_elements = topo_main_elements + topo_internal_elements if topology.get('module'): topology.defaults.module = topology.module topo_elements = topo_elements + topology.module for k in topology.keys(): if not k in topo_elements: common.error("Unknown top-level element %s" % k, category=common.IncorrectValue, module="topology")
def augment_node_images(topology): provider = topology.provider devices = topology.defaults.devices if not devices: common.fatal('Device defaults (defaults.devices) are missing') for n in topology.nodes: n.setdefault('device', topology.defaults.get('device')) if not n.device: common.error( 'No device type specified for node %s and there is no default device type' % n.name) continue if n.box: continue if 'image' in n: n.box = n.image del n['image'] continue devtype = n.device if not devtype in devices: common.error('Unknown device %s in node %s' % (devtype, n.name)) continue box = devices[devtype].image[provider] if not box: common.error( 'No image specified for device %s (provider %s) used by node %s' % (devtype, provider, n['name'])) continue n.box = box
def augment_mgmt_if(node, device_data, addrs): node.setdefault('mgmt', {}) if 'ifname' not in node.mgmt: mgmt_if = device_data.mgmt_if if not mgmt_if: ifname_format = device_data.interface_name if not ifname_format: common.fatal( "Missing interface name template for device type %s" % node['device']) ifindex_offset = device_data.get('ifindex_offset', 1) mgmt_if = ifname_format % (ifindex_offset - 1) node.mgmt.ifname = mgmt_if if addrs: for af in 'ipv4', 'ipv6': pfx = af + '_pfx' if pfx in addrs: if not af in node.mgmt: node.mgmt[af] = str(addrs[pfx][node['id'] + addrs['start']]) if addrs.mac_eui: addrs.mac_eui[5] = node['id'] node.mgmt.setdefault('mac', str(addrs['mac_eui']))
def augment_mgmt_if(node, device_data, addrs): if 'mgmt' not in node: node['mgmt'] = {} if 'ifname' not in node['mgmt']: mgmt_if = device_data.get('mgmt_if') if not mgmt_if: ifname_format = device_data.get('interface_name') if not ifname_format: common.fatal( "FATAL: Missing interface name template for device type %s" % n['device']) ifindex_offset = device_data.get('ifindex_offset', 1) mgmt_if = ifname_format % (ifindex_offset - 1) node['mgmt']['ifname'] = mgmt_if if addrs: for af in 'ipv4', 'ipv6': pfx = af + '_pfx' if pfx in addrs: if not af in node['mgmt']: node['mgmt'][af] = str(addrs[pfx][node['id'] + addrs['start']]) if 'mac_eui' in addrs: addrs['mac_eui'][5] = node['id'] if not 'mac' in node['mgmt']: node['mgmt']['mac'] = str(addrs['mac_eui'])
def checkup_on_dms(): if not exists(common.DMS_UDS_PATH): start(['./server.py']) for i in xrange(30): if exists(common.DMS_UDS_PATH): return sleep(0.1) fatal("Error: Could not start pydms service. Giving up.")
def main(): args = sys.argv[1:] if len(args) != 2: usage() removelist, root_path = args if removelist == "-": removelist_fh = sys.stdin else: removelist_fh = file(args[0], "r") if not os.path.isdir(root_path): fatal("root path does not exist: " + root_path) apply_removelist(removelist_fh, root_path)
def adjust_global_parameters(topology): topology.setdefault('provider', topology.defaults.provider) topology.defaults.provider = topology.provider if not topology.provider: common.fatal( 'Virtualization provider is not defined in either "provider" or "defaults.provider" elements' ) providers = topology.defaults.providers if not topology.provider in providers: plist = ', '.join(providers.keys()) common.fatal( 'Unknown virtualization provider %s. Supported providers are: %s' % (topology.provider, plist))
def main(): args = sys.argv[1:] if len(args) != 2: usage() removelist, root_path = args if removelist == '-': removelist_fh = sys.stdin else: removelist_fh = file(args[0], "r") if not os.path.isdir(root_path): fatal("root path does not exist: " + root_path) apply_removelist(removelist_fh, root_path)
def chroot_script(chroot, script_path, *args): if not isfile(script_path): fatal("no such script (%s)" % script_path) tmpdir = tempfile.mkdtemp(dir=join(chroot.path, "tmp"), prefix="chroot-script.") script_path_chroot = join(tmpdir, basename(script_path)) shutil.copy(script_path, script_path_chroot) os.chmod(script_path_chroot, 0755) err = chroot.system(paths.make_relative(chroot.path, script_path_chroot), *args) shutil.rmtree(tmpdir) return err
def read_yaml(fname): try: stream = open(fname, 'r') except: if common.LOGGING or common.VERBOSE: print("Cannot open YAML file %s" % fname) return None try: data = yaml.load(stream, Loader=yaml.SafeLoader) except: common.fatal("Cannot read YAML from %s: %s " % (fname, str(sys.exc_info()[1]))) stream.close() if common.LOGGING or common.VERBOSE: print("Read YAML data from %s" % fname) return data
def read_yaml(fname): if not os.path.isfile(fname): if common.LOGGING or common.VERBOSE: print("YAML file %s does not exist" % fname) return None try: data = Box().from_yaml(filename=fname, default_box=True, box_dots=True, default_box_none_transform=False) except: common.fatal("Cannot read YAML from %s: %s " % (fname, str(sys.exc_info()[1]))) if common.LOGGING or common.VERBOSE: print("Read YAML data from %s" % fname) return data
def main(): log = logging.getLogger('stderr') if len(sys.argv) != 4: common.fatal("Not enough arguments. Give 3.") if sys.argv[1] == 'mbox': box = mailbox.mbox(sys.argv[2], create=False) elif sys.argv[1] == 'Maildir': box = mailbox.Maildir(sys.argv[2]) else: common.fatal("Unknown arg 1: %s"%sys.argv[1]) log.info("Number of mails: %d" % box.__len__()) cmd = sys.argv[3] for key in box.iterkeys(): process = subprocess.Popen(cmd, stdin=subprocess.PIPE, shell=True) process.communicate(box.get_string(key)) log.info("For mail %s\n%s\nreturned with code %d" % (key, cmd, process.wait())) logging.shutdown() sys.exit(0)
def main(): log = logging.getLogger('stderr') # get query if len(sys.argv) == 1: query = sys.stdin.read() else: query = " ".join(sys.argv[1:]) log.info("Your query:\n%s"%query) # do search try: res = search(query, 'stderr') except lrparsing.ParseError: common.fatal("Could not parse query:\n%s\n%s"%(query, traceback.format_exc())) except IOError: common.fatal(traceback.format_exc()) # print result to stdout log.info("\nResult: %d documents"%len(res)) for val in res: print val
def load(fname, defaults, settings): topology = read_yaml(fname) if topology is None: common.fatal('Cannot read topology file: %s' % sys.exc_info()[0]) topology.input = [fname] topology.setdefault('defaults', {}) topology.setdefault('includes', ['defaults', 'global_defaults']) if not isinstance(topology.includes, list): common.error( \ "Topology 'includes' element (if present) should be a list", \ category=common.IncorrectValue,module="topology") topology.includes = [] if defaults and 'defaults' in topology.includes: include_defaults(topology, defaults) if settings and 'global_defaults' in topology.includes: include_defaults(topology, settings) return topology
def load(fname, defaults, settings): topology = read_yaml(fname) if topology is None: common.fatal('Cannot open topology file: %s' % sys.exc_info()[0]) topology['input'] = [fname] if not 'defaults' in topology: topology['defaults'] = {} local_defaults = read_yaml(defaults) if local_defaults: topology['input'].append(defaults) topology['defaults'] = common.merge_defaults(topology['defaults'], local_defaults) global_defaults = read_yaml(settings) if global_defaults: topology['input'].append(os.path.relpath(settings)) topology['defaults'] = common.merge_defaults(topology['defaults'], global_defaults) return topology
def check_link_type(data, nodes): node_cnt = link_node_count(data, nodes) link_type = data.get('type') if not link_type: common.fatal('Link type still undefined in check_link_type: %s' % data) return False if node_cnt == 0: common.error('No valid nodes on link %s' % data) return False if link_type == 'stub' and node_cnt > 1: common.error('More than one node connected to a stub link: %s' % data) return False if link_type == 'p2p' and node_cnt != 2: common.error('Point-to-point link needs exactly two nodes: %s' % data) return False if not link_type in ['stub', 'p2p', 'lan']: common.error('Invalid link type %s: %s' % (link_type, data)) return False return True
def main(): log = logging.getLogger('stderr') # open mailbox and read args box = cmd = None append = True allhashes = None i = 1 while i < len(sys.argv): arg = sys.argv[i] if arg == '--override': if i > 1: common.fatal("You should give --override as first argument!") append = False elif arg == '--mbox': if box: err("Multiple mailboxes given.") i += 1 try: if not append: os.remove(sys.argv[i]) except OSError: pass box = mailbox.mbox(sys.argv[i]) elif arg == '--maildir': if box: err("Multiple mailboxes given.") i += 1 box = mailbox.Maildir(sys.argv[i]) elif arg == '--exec': i += 1 cmd = sys.argv[i] elif arg == '--hashes': i += 1 allhashes = open(sys.argv[i], 'r').read() else: common.fatal("Unknown arg %s"%arg) i += 1 if allhashes == None: allhashes = sys.stdin.read() re_id = re.compile('([0-9A-Fa-f]+)\s+') for count, line in enumerate(allhashes.splitlines(True)): mo = re_id.match(line) if not mo: log.info("Ignoring line %d: %s" % (count, line)) continue docid = mo.group(1) if box == None and not cmd: stream = sys.stdout else: stream = None try: download(docid, box, cmd, stream, 'stderr') except IOError: common.fatal(traceback.format_exc()) if isinstance(box, mailbox.mbox): box.close()
def find(): files = glob('*.pyx') if files: try: import Cython except ImportError: common.fatal('Cython not installed') if not hasattr(Cython, "__version__"): common.fatal('Unknown Cython version: assuming too old. Minimum version is "0.14"') version = Cython.__version__.split('.', 2) major, minor = map(int, version[0:2]) if major * 100 + minor < 14: common.fatal('Cython version "%s", minimum version is "0.14"' % Cython.__version__) return files
output_path = None bootstrap_path = None pool_path = os.environ.get('FAB_POOL_PATH', None) for opt, val in opts: if opt == '-h': usage() if opt in ('-o', '--output'): output_path = val if opt in ('-p', '--pool'): pool_path = val if opt == "--bootstrap": if not os.path.isdir(val): fatal("directory does not exist (%s)" % val) bootstrap_path = val plan = Plan(pool_path=pool_path) if bootstrap_path: bootstrap_packages = set(iter_packages(bootstrap_path)) plan |= bootstrap_packages for package in bootstrap_packages: plan.packageorigins.add(package, 'bootstrap') for arg in args: if arg == "-" or os.path.exists(arg): subplan = Plan.init_from_file(arg, cpp_opts, pool_path) plan |= subplan
try: executil.system(cmd) except: print >> sys.stderr, "Warning: patch %s failed to apply" % patch def main(): try: opts, args = getopt.gnu_getopt(sys.argv[1:], "") except getopt.GetoptError, e: usage(e) if not len(args) == 2: usage() patch = args[0] dstpath = args[1] if not os.path.isfile(patch): fatal("does not exist: " + patch) if not os.path.isdir(dstpath): fatal("does not exist: " + dstpath) apply_patch(patch, dstpath) if __name__ == "__main__": main()
executil.system(cmd, overlay, dstpath) def main(): try: opts, args = getopt.gnu_getopt(sys.argv[1:], "", ['preserve']) except getopt.GetoptError, e: usage(e) if not len(args) == 2: usage() overlay = args[0] dstpath = args[1] kws = {} for opt, val in opts: kws[opt[2:]] = val for dir in (overlay, dstpath): if not os.path.isdir(dir): fatal("does not exist: " + dir) apply_overlay(overlay, dstpath, **kws) if __name__=="__main__": main()
elif opt in ('-n', '--no-deps'): resolve_deps = False elif opt in ('-x', '--apt-proxy'): apt_proxy = val elif opt in ('-i', '--ignore-errors'): ignore_errors = val.split(":") elif opt in ('-e', '--env'): env_conf = val chroot_path = args[0] if not os.path.isdir(chroot_path): fatal("chroot does not exist: " + chroot_path) if not pool_path and not resolve_deps: fatal("--no-deps cannot be specified if pool is not defined") if not arch: arch = getoutput("dpkg --print-architecture") plan = Plan(pool_path=pool_path) for arg in args[1:]: if arg == "-" or os.path.exists(arg): plan |= Plan.init_from_file(arg, cpp_opts, pool_path) else: plan.add(arg) if pool_path:
def die(message): cleanup() common.fatal(message)
if not exists(common.DMS_UDS_PATH): start(['./server.py']) for i in xrange(30): if exists(common.DMS_UDS_PATH): return sleep(0.1) fatal("Error: Could not start pydms service. Giving up.") checkup_on_dms() common.setup_signal_recording() request = parse_arguments() ruds = RichUnixDomainSocket() ret, desc = ruds.init() if ret: fatal(desc) addr = ''.join(sample(lowercase, 10)) ret, desc = ruds.bind(addr) if ret: die(desc) checkup_on_dms() ret, desc = ruds.connect(common.DMS_UDS_PATH) if ret: die(desc) ret, desc = send_request(ruds, request) if ret: die(desc)
script_path = None for opt, val in opts: if opt in ('-s', '--script'): script_path = val if opt in ('-e', '--env'): env_conf = val if not args: usage() newroot = args[0] args = args[1:] if not isdir(newroot): fatal("no such chroot (%s)" % newroot) chroot = Chroot(newroot, environ=get_environ(env_conf)) fake_initctl = RevertibleInitctl(chroot) if script_path: err = chroot_script(chroot, script_path, *args) sys.exit(err) else: if not args: args = ('/bin/bash',) err = chroot.system(*args) sys.exit(err)
import common from datetime import timedelta stamps_directory = join(getcwd(), 'stamps') stamp_order = ('bootstrap', 'root.spec', 'root.build', 'root.patched', 'root.sandbox', 'cdroot', 'product.iso') prog_dir = dirname(abspath(__file__)) default_config_path = join(prog_dir, 'conf', 'make_wrap_default_config.json') config_path = join(prog_dir, 'conf', 'make_wrap_config.json') if not isfile(config_path): common.warning("No config found, creating one from defaults") if not isfile(default_config_path): common.fatal("No default config found!") else: with open(default_config_path, 'r') as fob1: with open(config_path, 'w') as fob2: fob2.write(fob1.read()) fob1.seek(0) config = json.load(fob1) else: with open(config_path, 'r') as fob1: config = json.load(fob1) HOST = config['host'] PORT = config['port'] title = config["app_name"].format(hostname=socket.gethostname(), current_dir=basename(getcwd()))
elif opt in ('-n', '--no-deps'): resolve_deps = False elif opt in ('-x', '--apt-proxy'): apt_proxy = val elif opt in ('-i', '--ignore-errors'): ignore_errors = val.split(":") elif opt in ('-e', '--env'): env_conf = val chroot_path = args[0] if not os.path.isdir(chroot_path): fatal("chroot does not exist: " + chroot_path) if pool_path and not resolve_deps: fatal("--no-deps cannot be specified if pool is not defined") if not arch: arch = getoutput("dpkg --print-architecture") plan = Plan(pool_path=pool_path) for arg in args[1:]: if arg == "-" or os.path.exists(arg): plan |= Plan.init_from_file(arg, cpp_opts, pool_path) else: plan.add(arg) if pool_path:
def die(message): cleanup() fatal(message)
try: executil.system(cmd) except: print >> sys.stderr, "Warning: patch %s failed to apply" % patch def main(): try: opts, args = getopt.gnu_getopt(sys.argv[1:], "") except getopt.GetoptError, e: usage(e) if not len(args) == 2: usage() patch = args[0] dstpath = args[1] if not os.path.isfile(patch): fatal("does not exist: " + patch) if not os.path.isdir(dstpath): fatal("does not exist: " + dstpath) apply_patch(patch, dstpath) if __name__=="__main__": main()
def main(): log = logging.getLogger('stderr') elog = logging.getLogger('view') # error logger # here is the contents of the doc ids file saved, # if its given with --docs allhashes = '' squery = None # load defaults muttrc = config.muttrc boxpath = config.temp_mailbox sentpath = config.sent_mailbox changedhashesfile = None doupload = False verbose = False uploadsent = False # load command line args i = 1 while i < len(sys.argv): arg = sys.argv[i] if arg == '--docs': i += 1 allhashes = open(sys.argv[i], 'r').read() elif arg == '--verbose': verbose = True elif arg == '--tmp': i += 1 boxpath = sys.argv[i] elif arg == '--muttrc': i += 1 muttrc = sys.argv[i] elif arg == '--changed': i += 1 changedhashesfile = sys.argv[i] elif arg == '--upload': doupload = True elif arg == '--sent': uploadsent = True elif arg == '--search': i+= 1 squery = sys.argv[i] else: common.fatal("Unknown arg %s"%arg) i += 1 if squery and allhashes: common.fatal("Arguments --docs and --search are exclusive!") if not squery and not allhashes: common.fatal("No documents given. Try --docs FILE or --search QUERY .") # open temporary mailbox if boxpath == None: common.fatal("No temporary mailbox given.") boxpath = os.path.expanduser(boxpath) log.debug("Using temporary mailbox: %s"%boxpath) # try to delete old temporary mailbox try: os.remove(boxpath) except OSError: pass # open box = mailbox.mbox(boxpath) if muttrc: muttrc = '-F '+muttrc else: muttrc = '' if allhashes: ids = [] # read hashes re_id = re.compile('([0-9A-Fa-f]+)\s+') for count, line in enumerate(allhashes.splitlines(True)): mo = re_id.match(line) if mo == None: log.info("Ignoring line %d: %s" % (count+1, line)) continue docid = mo.group(1) ids.append(docid) if squery: try: ids = search.search(squery, 'stderr') except lrparsing.ParseError: common.fatal("Could not parse query:\n%s\n%s"%(squery, traceback.format_exc())) except IOError: common.fatal(traceback.format_exc()) if len(ids) == 0: common.fatal("No documents found.") if len(ids) > 100: sys.stdout.write("Download %d mails? (y/n): "%len(ids)) resp = raw_input() if resp.lower() != 'y' and resp.lower() != 'yes': common.fatal("OK. Exit.") # download docs log.info("Downloading %d mails."%len(ids)) for doc in ids: try: download.download(doc, box=box, logger='stderr') except IOError as e: common.fatal("Couldnt download mail %s\n %s" % (docid, traceback.format_exc(e))) if len(ids) != box.__len__(): common.fatal("Something strange happened. Not enough mails in mailbox!") hashes_before = hash_mails(box) box.close() # open mutt cmd = "mutt %s -f %s" % (muttrc, boxpath) log.info(cmd) retval = subprocess.call(cmd, shell=True) log.info("Mutt returned with status %d."%retval) if retval: common.fatal("Mutt error %d. EXIT. No changes to DB"%retval) box = mailbox.mbox(boxpath) # detect changes in mbox hashes_after = hash_mails(box) if len(hashes_before) != len(hashes_after) or len(hashes_before) != len(ids): common.fatal("Some mails were deleted (or added). Aborting. No changes made to DB.") # filter differing hashes changed = filter(lambda pair: pair[1] != pair[2], zip(ids, hashes_before, hashes_after)) # get (mbox key, docid) only changed = map(lambda pair: (pair[1][0], pair[0]), changed) log.info("Raw data of %d mails changed."%len(changed)) # changed is now a list of tuples of (mbox key, docid) if verbose: uploadlogger = 'stderr' else: uploadlogger = 'none' olddocs = [] changeddocs = [] metachanged = [] metachangeddocs = [] # check real changes in metadata if changedhashesfile or doupload: log.info("Checking for meta changes ...") # parse changed mails for key, docid in changed: if not changedhashesfile: sys.stdout.write('.') try: changeddocs.append(upload.parsemail(box.get_string(key), logger=uploadlogger)) except: elog.error("Exception while parsing mail:\n %s" % traceback.format_exc()) upload.save_mail(docid, box.get_string(key)) logging.shutdown() sys.exit(1) if not changedhashesfile: sys.stdout.flush() if not changedhashesfile: sys.stdout.write('\n') # download old docs for _, docid in changed: try: olddocs.append(common.get_doc(docid, "Could not get doc.", uploadlogger)) except IOError: common.fatal(traceback.format_exc()) # compare docs for chan, changd, oldd in zip(changed, changeddocs, olddocs): if not common.eq_mail_meta(changd, oldd): metachanged.append(chan) metachangeddocs.append(changd) log.info("Metadata of %d mails changed."%len(metachanged)) changed = metachanged # write changed mails file if changedhashesfile: f = open(changedhashesfile, 'w+') for key, docid in changed: f.write(docid) f.write('\n') f.close() # upload changed mails if doupload: # TODO ask for upload? log.info("Uploading %d mails"%len(changed)) for (key, docid), mdata in zip(changed, metachangeddocs): if not changedhashesfile: sys.stdout.write('.') try: #FIXME upload.upload(docid, mdata, override=True, preserveread=False, logger=uploadlogger) except: elog.error("Exception while parsing or uploading mail:\n %s" % traceback.format_exc()) upload.save_mail(docid, box.get_string(key)) logging.shutdown() sys.exit(1) if not changedhashesfile: sys.stdout.flush() if not changedhashesfile: sys.stdout.write('\n') box.close() # upload sent mails if uploadsent and sentpath: # open mailbox sentpath = os.path.expanduser(sentpath) log.debug("Opening sent mailbox: %s"%sentpath) try: box = mailbox.mbox(sentpath, create=False) except mailbox.NoSuchMailboxError as e: common.fatal("Given mailbox for sent mails does not exist: %s"%sentpath) log.info("Uploading %d mails in sent mbox %s"%(box.__len__(), sentpath)) # upload for key in box.iterkeys(): try: mail = box.get_string(key) mdata = upload.parsemail(mail, logger=uploadlogger) doc_id = upload.hash_mail(mail) upload.upload(doc_id, mdata, mail, logger=uploadlogger) except: elog.error("Exception while parsing or uploading mail:\n %s" % traceback.format_exc()) upload.save_mail(docid, box.get_string(key)) continue box.close() # truncate file log.debug("Truncating sent mbox: %s"%sentpath) open(sentpath, 'w').close() logging.shutdown() sys.exit(0)
def main(): log = logging.getLogger('stderr') docid = None dryrun = False i = 1 while i < len(sys.argv): arg = sys.argv[i] if arg == '--docid': i += 1 docid = sys.argv[i] if arg == '--dry': i += 1 dryrun = True else: common.fatal("Unknown arg %s"%arg) i += 1 docids = [] if docid: docids = [docid] else: _, docids = common.get_view('conflicts', logger='stderr') log.info("Number of conflicts to solve: %d"%len(docids)) for docnum, docid in enumerate(docids): print "\n### Document %d of %d"%(docnum, len(docids)) ## download all revisions of document: revs = [] # couchdbs deterministic winnning revision: winningdoc = common.get_doc("%s?conflicts=true"%docid, logger='stderr') revs.append(winningdoc) # other revisions for rev in winningdoc['_conflicts']: doc = common.get_doc("%s?rev=%s"%(docid, rev), logger='stderr') revs.append(doc) log.info("Revisions: %d"%len(revs)) ## print revisions for revnum, rev in enumerate(revs): print "# Revision no. %d"%(revnum+1) print rev['_rev'] print "Date: %s UTC Upload Date: %s UTC"%(rev.get('date', None), rev.get('upload_date', None)) print "From: %s"%' '.join(rev.get('from', ['NONE'])) print "To: %s"%' '.join(rev.get('to', ['NONE'])) print "Labels: %s"%' '.join(rev.get('labels', ['NONE'])) # ask user for preferred revision revno = -1 while revno == -1: sys.stdout.write("# Decide now (s for skip): ") revno = raw_input() if revno == 's': break try: revno = int(revno) except ValueError: print "You jester. A number please!" revno = -1 continue if revno < 1 or revno > len(revs): print "Give a number between 1 to %d (both inclusive)"%len(revs) revno = -1 else: # create bulk doc data bulk = [] for revnum, rev in enumerate(revs): if revnum != revno-1: rev['_deleted'] = True bulk.append(rev) bulkdata = json.dumps({'docs':bulk}) log.debug(bulkdata) if dryrun: print "If this weren't a dry run, I would have updated %d documents now."%len(bulk) else: print "Uploading changes ..." r = requests.post("%s/_bulk_docs"%(config.couchdb_url) , auth=config.couchdb_auth, verify=False, data=bulkdata , headers = {'content-type': 'application/json'}) log.debug(('PUT', r.status_code, r.url)) if not r.status_code == 201: raise IOError("Could not upload documents\n %s\n CouchDB response code %d, text: %s\nPUT Data:\n%s" % (r.url, r.status_code, r.text, bulkdata)) logging.shutdown() sys.exit(0)
script_path = None for opt, val in opts: if opt in ('-s', '--script'): script_path = val if opt in ('-e', '--env'): env_conf = val if not args: usage() newroot = args[0] args = args[1:] if not isdir(newroot): fatal("no such chroot (%s)" % newroot) chroot = Chroot(newroot, environ=get_environ(env_conf)) fake_initctl = RevertibleInitctl(chroot) if script_path: err = chroot_script(chroot, script_path, *args) sys.exit(err) else: if not args: args = ('/bin/bash', ) err = chroot.system(*args) sys.exit(err)