def main(): """Main routine""" debug = False try: default_device = get_default_device() argparser = ArgumentParser(description=modules[__name__].__doc__) if platform != 'win32': argparser.add_argument('-f', '--fullmode', dest='fullmode', action='store_true', help='use full terminal mode, exit with ' '[Ctrl]+B') argparser.add_argument('device', nargs='?', default=default_device, help='serial port device name (default: %s)' % default_device) argparser.add_argument('-b', '--baudrate', help='serial port baudrate (default: %d)' % MiniTerm.DEFAULT_BAUDRATE, default='%s' % MiniTerm.DEFAULT_BAUDRATE) argparser.add_argument('-w', '--hwflow', action='store_true', help='hardware flow control') argparser.add_argument('-e', '--localecho', action='store_true', help='local echo mode (print all typed chars)') argparser.add_argument('-r', '--crlf', action='count', default=0, help='prefix LF with CR char, use twice to ' 'replace all LF with CR chars') argparser.add_argument('-l', '--loopback', action='store_true', help='loopback mode (send back all received ' 'chars)') argparser.add_argument('-s', '--silent', action='store_true', help='silent mode') argparser.add_argument('-P', '--vidpid', action='append', help='specify a custom VID:PID device ID, ' 'may be repeated') argparser.add_argument('-V', '--virtual', type=FileType('r'), help='use a virtual device, specified as YaML') argparser.add_argument('-v', '--verbose', action='count', help='increase verbosity') argparser.add_argument('-d', '--debug', action='store_true', help='enable debug mode') args = argparser.parse_args() debug = args.debug if not args.device: argparser.error('Serial device not specified') loglevel = max(DEBUG, ERROR - (10 * (args.verbose or 0))) loglevel = min(ERROR, loglevel) if debug: formatter = Formatter( '%(asctime)s.%(msecs)03d %(name)-20s ' '%(message)s', '%H:%M:%S') else: formatter = Formatter('%(message)s') FtdiLogger.set_formatter(formatter) FtdiLogger.set_level(loglevel) FtdiLogger.log.addHandler(StreamHandler(stderr)) if args.virtual: from pyftdi.usbtools import UsbTools # Force PyUSB to use PyFtdi test framework for USB backends UsbTools.BACKENDS = ('pyftdi.tests.backend.usbvirt', ) # Ensure the virtual backend can be found and is loaded backend = UsbTools.find_backend() loader = backend.create_loader()() loader.load(args.virtual) try: add_custom_devices(Ftdi, args.vidpid) except ValueError as exc: argparser.error(str(exc)) full_mode = args.fullmode if platform != 'win32' else False init_term(full_mode) miniterm = MiniTerm(device=args.device, baudrate=to_bps(args.baudrate), parity='N', rtscts=args.hwflow, debug=args.debug) miniterm.run(full_mode, args.loopback, args.silent, args.localecho, args.crlf) except (IOError, ValueError) as exc: print('\nError: %s' % exc, file=stderr) if debug: print(format_exc(chain=False), file=stderr) exit(1) except KeyboardInterrupt: exit(2)
__deflog__ = {"ls":stderr, # log stream "ll":"default", # log level "lf":"nltm", # log format } __logforms__ = { "nltm":"%(name)s\t%(levelname)s\t%(asctime)s\t%(message)s" } nan = float("nan") devnull = open(os.devnull, "wb") # monkey patching now to have simpler code later file_w = FileType("wb") file_w.__dict__["__name__"] = "file_w" file_r = FileType("rb") file_r.__dict__["__name__"] = "file_r" file_a = FileType("a+") file_a.__dict__["__name__"] = "file_a" # automatic import of mokelib class MokeError(Exception): """General moke exception. """ pass def num(x): """(internal) check if value is a number.
gc_parser.set_defaults(function=main_gc) dump_parser = subcommands.add_parser( "dump", description="dump names from the backend", help="dump names from the backend" ) dump_parser.add_argument( "name", type=str, help="name to dump" ) dump_parser.add_argument( "-o", "--output", type=FileType("wb"), default=sys.stdout.buffer, help="file to write the content to" ) dump_parser.add_argument( "-p", "--protocol", type=int, default=pickle.DEFAULT_PROTOCOL, help="pickle protocol to use, defaults to pickle.DEFAULT_PROTOCOL" ) dump_parser.set_defaults(function=main_dump) def main(argv: Optional[List[str]] = None, stdout: TextIO = sys.stdout) -> int: """cli entry point"""
def main(): """standard main function""" # standard options nWorkers = 5 blocksize = 10000 parser = ArgumentParser(prog='css-extract', description='spike extraction from .ncs files', epilog='Johannes Niediek ([email protected])') parser.add_argument('--files', nargs='+', help='.ncs files to be extracted') parser.add_argument('--start', type=int, help='start index for extraction') parser.add_argument('--stop', type=int, help='stop index for extraction') parser.add_argument('--jobs', nargs=1, help='job file contains one filename per row') parser.add_argument('--matfile', nargs=1, help='extract data from a matlab file') parser.add_argument('--h5', action='store_true', default=False, help='assume that files are h5 files') parser.add_argument('--matfile-scale-factor', nargs='?', type=float, help='rescale matfile data by this factor' ' (to obtain microvolts)', default=1) parser.add_argument('--destination', nargs=1, help='folder where spikes should be saved') parser.add_argument('--refscheme', nargs=1, type=FileType(mode='r'), help='scheme for re-referencing') args = parser.parse_args() if ((args.files is None) and (args.matfile is None) and (args.jobs is None)): parser.print_help() print('Supply either files or jobs or matfile.') return if args.destination is not None: destination = args.destination[0] else: destination = '' # special case for a matlab file if args.matfile is not None: jname = os.path.splitext(os.path.basename(args.matfile[0]))[0] jobs = [{ 'name': jname, 'filename': args.matfile[0], 'is_matfile': True, 'count': 0, 'destination': destination, 'scale_factor': args.matfile_scale_factor }] mp_extract(jobs, 1) return if args.h5: jobs = [] for f in args.files: size = get_h5size(f) starts = list(range(0, size, 32000 * 5 * 60)) stops = starts[1:] + [size] name = os.path.splitext(os.path.basename(f))[0] for i in range(len(starts)): jdict = { 'name': name, 'filename': f, 'start': starts[i], 'stop': stops[i], 'is_h5file': True, 'count': i, 'destination': destination } jobs.append(jdict) mp_extract(jobs, nWorkers) return if args.jobs: with open(args.jobs[0], 'r') as f: files = [a.strip() for a in f.readlines()] f.close() print('Read jobs from ' + args.jobs[0]) else: files = args.files if files[0] is None: print('Specify files!') return # construct the jobs jobs = [] references = None if args.refscheme: import csv reader = csv.reader(args.refscheme[0], delimiter=';') references = {line[0]: line[1] for line in reader} for f in files: if args.start: start = args.start else: start = 0 nrecs = get_nrecs(f) if args.stop: stop = min(args.stop, nrecs) else: stop = nrecs if stop % blocksize > blocksize / 2: laststart = stop - blocksize else: laststart = stop starts = list(range(start, laststart, blocksize)) stops = starts[1:] + [stop] name = os.path.splitext(os.path.basename(f))[0] if references is not None: reference = references[f] print('{} (re-referenced to {})'.format(f, reference)) else: reference = None print(name) for i in range(len(starts)): jdict = { 'name': name, 'filename': f, 'start': starts[i], 'stop': stops[i], 'count': i, 'destination': destination, 'reference': reference } jobs.append(jdict) mp_extract(jobs, nWorkers)
out_data = [out_line.as_dict() for out_line in out_lines] for out_line in out_data: out_line['context_freq'] = int(out_line['context_freq']) out_line['response_nl'] = [ out_line['response_nl1'], out_line['response_nl2'], out_line['response_nl3'] ] out_line['response_nl_l'] = [ out_line['response_nl1_l'], out_line['response_nl2_l'], out_line['response_nl3_l'] ] del out_line['response_nl1'] del out_line['response_nl1_l'] del out_line['response_nl2'] del out_line['response_nl2_l'] del out_line['response_nl3'] del out_line['response_nl3_l'] json.dump(out_data, fh, ensure_ascii=False, indent=4, sort_keys=True) if __name__ == '__main__': ap = ArgumentParser() ap.add_argument('-f', '--finished-csv-delim', type=str, default=",") ap.add_argument('-i', '--input-csv-delim', type=str, default="\t") ap.add_argument('input_file', type=FileType('r')) ap.add_argument('output_file_name', type=str) ap.add_argument('finished_files', type=FileType('r'), nargs='+') args = ap.parse_args() main(args)
def arg_parse(): parser = ArgumentParser() parser.add_argument('--data', type=FileType('r'), help='Input json file') parser.add_argument('--output', type=FileType('w'), help='Output tsv file') return parser.parse_args()
def modules(): with open('modules.yaml') as fp: modules = yaml.load(fp) for module in modules: module['task'] = html.escape(module['task']) print(module_html.format(**module)) if __name__ == '__main__': from argparse import ArgumentParser, FileType from sys import stdin parser = ArgumentParser() parser.add_argument('--file', type=FileType(), default=stdin) args = parser.parse_args() for line in args.file: line = line[:-1] # trim newline match = find_code(line) if match: name = match.group(1) py = htmlize(code_for(name, 'python'), 'python') go = htmlize(code_for(name, 'go'), 'go') print(table_html.format(py=py, go=go)) elif line.strip() == ':modules:': modules() else: print(line) continue