def listfunction(llist):
    """This function does all the work and lists groups or hosts"""
    variable_manager = VariableManager()
    loader = DataLoader()
    if not os.path.isfile(inventory_file):
        print "%s is not a file - halting. Consider using the '--inventory $path/to/ansible_inventory file' parameter" % inventory_file
        sys.exit(1)
    else:
        inventory = Inventory(loader=loader, variable_manager=variable_manager, host_list=inventory_file)

    if chosen_group and single:
        def traverse(agroup, hostset):
            """Recursive depth-first traversal"""
            for child in agroup.child_groups:
                traverse(child, hostset)
            if len(agroup.hosts) > 0:
                hostset.add(agroup.hosts[0].name.encode('utf8'))
        single_hosts = set()
        traverse(inventory.groups[chosen_group], single_hosts)
        return {chosen_group:list(single_hosts)}

    if chosen_group:
        thegroup = inventory.groups[chosen_group]
        newhosts = []
        for h in thegroup.get_hosts():
            newhosts.append(h.name.encode('utf8'))
        return {chosen_group:newhosts}
    else:
        all_groups = {}
        for g in inventory.groups:
            newhosts = []
            for h in inventory.get_group(g).get_hosts():
                newhosts.append(h.name.encode('utf8'))
            all_groups[g] = newhosts
        return all_groups
Ejemplo n.º 2
1
    def _clone_test_db(self, suffix, verbosity, keepdb=False):
        source_database_name = self.connection.settings_dict['NAME']
        target_database_name = self.get_test_db_clone_settings(suffix)['NAME']
        test_db_params = {
            'dbname': self.connection.ops.quote_name(target_database_name),
            'suffix': self.sql_table_creation_suffix(),
        }
        with self._nodb_connection.cursor() as cursor:
            try:
                self._execute_create_test_db(cursor, test_db_params, keepdb)
            except Exception:
                try:
                    if verbosity >= 1:
                        self.log('Destroying old test database for alias %s...' % (
                            self._get_database_display_str(verbosity, target_database_name),
                        ))
                    cursor.execute('DROP DATABASE %(dbname)s' % test_db_params)
                    self._execute_create_test_db(cursor, test_db_params, keepdb)
                except Exception as e:
                    self.log('Got an error recreating the test database: %s' % e)
                    sys.exit(2)

        dump_cmd = DatabaseClient.settings_to_cmd_args(self.connection.settings_dict)
        dump_cmd[0] = 'mysqldump'
        dump_cmd[-1] = source_database_name
        load_cmd = DatabaseClient.settings_to_cmd_args(self.connection.settings_dict)
        load_cmd[-1] = target_database_name

        dump_proc = subprocess.Popen(dump_cmd, stdout=subprocess.PIPE)
        load_proc = subprocess.Popen(load_cmd, stdin=dump_proc.stdout, stdout=subprocess.PIPE)
        dump_proc.stdout.close()    # allow dump_proc to receive a SIGPIPE if load_proc exits.
        load_proc.communicate()
Ejemplo n.º 3
1
def make_cache_level(ncaches, prototypes, level, next_cache):
    global next_subsys_index, proto_l1, testerspec, proto_tester

    index = next_subsys_index[level]
    next_subsys_index[level] += 1

    # Create a subsystem to contain the crossbar and caches, and
    # any testers
    subsys = SubSystem()
    setattr(system, "l%dsubsys%d" % (level, index), subsys)

    # The levels are indexing backwards through the list
    ntesters = testerspec[len(cachespec) - level]

    # Scale the progress threshold as testers higher up in the tree
    # (smaller level) get a smaller portion of the overall bandwidth,
    # and also make the interval of packet injection longer for the
    # testers closer to the memory (larger level) to prevent them
    # hogging all the bandwidth
    limit = (len(cachespec) - level + 1) * 100000000
    testers = [proto_tester(interval=10 * (level * level + 1), progress_check=limit) for i in xrange(ntesters)]
    if ntesters:
        subsys.tester = testers

    if level != 0:
        # Create a crossbar and add it to the subsystem, note that
        # we do this even with a single element on this level
        xbar = L2XBar()
        subsys.xbar = xbar
        if next_cache:
            xbar.master = next_cache.cpu_side

        # Create and connect the caches, both the ones fanning out
        # to create the tree, and the ones used to connect testers
        # on this level
        tree_caches = [prototypes[0]() for i in xrange(ncaches[0])]
        tester_caches = [proto_l1() for i in xrange(ntesters)]

        subsys.cache = tester_caches + tree_caches
        for cache in tree_caches:
            cache.mem_side = xbar.slave
            make_cache_level(ncaches[1:], prototypes[1:], level - 1, cache)
        for tester, cache in zip(testers, tester_caches):
            tester.port = cache.cpu_side
            cache.mem_side = xbar.slave
    else:
        if not next_cache:
            print "Error: No next-level cache at top level"
            sys.exit(1)

        if ntesters > 1:
            # Create a crossbar and add it to the subsystem
            xbar = L2XBar()
            subsys.xbar = xbar
            xbar.master = next_cache.cpu_side
            for tester in testers:
                tester.port = xbar.slave
        else:
            # Single tester
            testers[0].port = next_cache.cpu_side
Ejemplo n.º 4
0
def copy_hardware(hemps_path, testcase_path, system_model_description):
    
    source_hw_path = hemps_path+"/hardware"
    testcase_hw_path = testcase_path+"/hardware"
    
    #Creates the direcoty into testcase path
    create_ifn_exists(testcase_hw_path)
    
    if system_model_description == "sc" or system_model_description == "scmod":
        
        delete_if_exists(testcase_hw_path+"/vhdl")
        source_hw_path = source_hw_path+"/sc"
        testcase_hw_path = testcase_hw_path+"/sc"
        ignored_names_list = [".svn" , ".vhd"]
        
    elif system_model_description == "vhdl":
        
        delete_if_exists(testcase_hw_path+"/sc")
        source_hw_path = source_hw_path+"/vhdl"
        testcase_hw_path = testcase_hw_path+"/vhdl"
        ignored_names_list = [".svn" , ".h", ".cpp"]
        
    else:
        sys.exit('Error in system_model_description - you must provide a compatible system model description')
    
    generic_copy(source_hw_path, testcase_hw_path, ignored_names_list)
Ejemplo n.º 5
0
    def stop(self):
        """
                Stop the daemon
                """
        # Get the pid from the pidfile
        try:
            pf = file(self.pidfile, 'r')
            pid = int(pf.read().strip())
            pf.close()
        except IOError:
            pid = None

        if not pid:
            message = "pidfile %s does not exist. Daemon not running?\n"
            sys.stderr.write(message % self.pidfile)
            return  # not an error in a restart

        # Try killing the daemon process
        try:
            while 1:
                os.kill(pid, SIGTERM)
                time.sleep(0.1)
        except OSError, err:
            err = str(err)
            if err.find("No such process") > 0:
                if os.path.exists(self.pidfile):
                    os.remove(self.pidfile)
            else:
                print str(err)
                sys.exit(1)
Ejemplo n.º 6
0
 def bug_found(self):
     """
     Builds a crash-report when StarCluster encounters an unhandled
     exception. Report includes system info, python version, dependency
     versions, and a full debug log and stack-trace of the crash.
     """
     dashes = '-' * 10
     header = dashes + ' %s ' + dashes + '\n'
     crashfile = open(static.CRASH_FILE, 'w')
     argv = sys.argv[:]
     argv[0] = os.path.basename(argv[0])
     argv = ' '.join(argv)
     crashfile.write(header % "SYSTEM INFO")
     crashfile.write("StarCluster: %s\n" % __version__)
     crashfile.write("Python: %s\n" % sys.version.replace('\n', ' '))
     crashfile.write("Platform: %s\n" % platform.platform())
     dependencies = ['boto', 'paramiko', 'Crypto']
     for dep in dependencies:
         self.__write_module_version(dep, crashfile)
     crashfile.write("\n" + header % "CRASH DETAILS")
     crashfile.write('Command: %s\n\n' % argv)
     for line in logger.get_session_log():
         crashfile.write(line)
     crashfile.close()
     print
     log.error("Oops! Looks like you've found a bug in StarCluster")
     log.error("Crash report written to: %s" % static.CRASH_FILE)
     log.error("Please remove any sensitive data from the crash report")
     log.error("and submit it to [email protected]")
     sys.exit(1)
Ejemplo n.º 7
0
    def load_commands_from_entry_point(self, group='mach.providers'):
        """Scan installed packages for mach command provider entry points. An
        entry point is a function that returns a list of paths to files or
        directories containing command providers.

        This takes an optional group argument which specifies the entry point
        group to use. If not specified, it defaults to 'mach.providers'.
        """
        try:
            import pkg_resources
        except ImportError:
            print("Could not find setuptools, ignoring command entry points",
                  file=sys.stderr)
            return

        for entry in pkg_resources.iter_entry_points(group=group, name=None):
            paths = entry.load()()
            if not isinstance(paths, Iterable):
                print(INVALID_ENTRY_POINT % entry)
                sys.exit(1)

            for path in paths:
                if os.path.isfile(path):
                    self.load_commands_from_file(path)
                elif os.path.isdir(path):
                    self.load_commands_from_directory(path)
                else:
                    print("command provider '%s' does not exist" % path)
Ejemplo n.º 8
0
def main(name):
  try:
    from python_qt_binding.QtGui import QApplication
  except:
    print >> sys.stderr, "please install 'python_qt_binding' package!!"
    sys.exit(-1)

  masteruri = init_cfg_path()
  parser = init_arg_parser()
  args = rospy.myargv(argv=sys.argv)
  parsed_args = parser.parse_args(args[1:])
  # Initialize Qt
  global app
  app = QApplication(sys.argv)

  # decide to show main or echo dialog
  global main_form
  if parsed_args.echo:
    main_form = init_echo_dialog(name, masteruri, parsed_args.echo[0], parsed_args.echo[1], parsed_args.hz)
  else:
    main_form = init_main_window(name, masteruri, parsed_args.file)

  # resize and show the qt window
  if not rospy.is_shutdown():
    os.chdir(PACKAGE_DIR) # change path to be able to the images of descriptions
    main_form.resize(1024, 720)
    screen_size = QApplication.desktop().availableGeometry()
    if main_form.size().width() >= screen_size.width() or main_form.size().height() >= screen_size.height()-24:
      main_form.showMaximized()
    else:
      main_form.show()
    exit_code = -1
    rospy.on_shutdown(finish)
    exit_code = app.exec_()
Ejemplo n.º 9
0
def connectToDB():
    """
    _connectToDB_
    
    Connect to the database specified in the WMAgent config.
    """
    if not os.environ.has_key("WMAGENT_CONFIG"):
        print "Please set WMAGENT_CONFIG to point at your WMAgent configuration."
        sys.exit(1)
        
    if not os.path.exists(os.environ["WMAGENT_CONFIG"]):
        print "Can't find config: %s" % os.environ["WMAGENT_CONFIG"]
        sys.exit(1)

    wmAgentConfig = loadConfigurationFile(os.environ["WMAGENT_CONFIG"])
    
    if not hasattr(wmAgentConfig, "CoreDatabase"):
        print "Your config is missing the CoreDatabase section."

    socketLoc = getattr(wmAgentConfig.CoreDatabase, "socket", None)
    connectUrl = getattr(wmAgentConfig.CoreDatabase, "connectUrl", None)
    (dialect, junk) = connectUrl.split(":", 1)

    myWMInit = WMInit()
    myWMInit.setDatabaseConnection(dbConfig = connectUrl, dialect = dialect,
                                   socketLoc = socketLoc)
    return
Ejemplo n.º 10
0
def run(arguments=sys.argv[1:]):
    # parse the command line arguments
    (options, command) = parse_args(arguments)

    # ensure the binary is given
    if not options.binary:
        print "Please provide a path to your Firefox binary: -b, --binary"
        sys.exit(1)

    # set the BROWSER_PATH environment variable so that
    # subshells will be able to invoke mozrunner
    os.environ['BROWSER_PATH'] = options.binary

    # Parse the manifest
    mp = TestManifest(manifests=(options.manifest,), strict=False)

    # run + report
    if command == "testpy":
        tests = mp.active_tests(disabled=False)
        results = test_all_python(mp.get(tests=tests, type='python'), options)
        if results.failures or results.errors:
            sys.exit(report(True, results, None, options))
        else:
            sys.exit(report(False))

    elif command == "testjs":
        tests = mp.active_tests(disabled=False)
        results = test_all_js(mp.get(tests=tests, type='javascript'), options)
        if results.fails:
            sys.exit(report(True, None, results, options))
        else:
            sys.exit(report(False))

    elif command == "testall":
        test_all(mp.active_tests(disabled=False), options)
Ejemplo n.º 11
0
 def _act_on_pillows(self, action):
     # Used to stop or start pillows
     service = Pillowtop(self.environment, AnsibleContext(None))
     exit_code = service.run(action=action)
     if not exit_code == 0:
         print("ERROR while trying to {} pillows. Exiting.".format(action))
         sys.exit(1)
Ejemplo n.º 12
0
    def _collect(self, lines):
        elements = {}
        tag = None
        for line_tmp in lines:
            line = line_tmp.replace('!', '#').split('#')[0]
            for val in [x.lower() for x in line.split()]:
                if val in self._set_methods:
                    tag = val
                    elements[tag] = []
                elif tag is not None:
                    elements[tag].append(val)

        for tag in ['natom', 'ntypat']:
            if tag not in elements:
                print("%s is not found in the input file." % tag)
                sys.exit(1)

        for tag in elements:
            self._values = elements[tag]
            if tag == 'natom' or tag == 'ntypat':
                self._set_methods[tag]()

        for tag in elements:
            self._values = elements[tag]
            if tag != 'natom' and tag != 'ntypat':
                self._set_methods[tag]()
Ejemplo n.º 13
0
 def handle_completion(self):
     if self.is_completion_active():
         gparser = self.create_global_parser(no_usage=True, add_help=False)
         # set sys.path to COMP_LINE if it exists
         self._init_completion()
         # fetch the global options
         gopts = self.get_global_opts()
         # try to load StarClusterConfig into global options
         if gopts:
             try:
                 cfg = config.StarClusterConfig(gopts.CONFIG)
                 cfg.load()
             except exception.ConfigError:
                 cfg = None
             gopts.CONFIG = cfg
         scmap = {}
         for sc in commands.all_cmds:
             sc.gopts = gopts
             for n in sc.names:
                 scmap[n] = sc
         listcter = completion.ListCompleter(scmap.keys())
         subcter = completion.NoneCompleter()
         completion.autocomplete(gparser, listcter, None, subcter,
                                 subcommands=scmap)
         sys.exit(1)
Ejemplo n.º 14
0
def links(args):
    """
    %prog links url

    Extract all the links "<a href=''>" from web page.
    """
    p = OptionParser(links.__doc__)
    p.add_option("--img", default=False, action="store_true",
                 help="Extract <img> tags [default: %default]")
    opts, args = p.parse_args(args)

    if len(args) != 1:
        sys.exit(not p.print_help())

    url, = args
    img = opts.img

    htmlfile = download(url)
    page = open(htmlfile).read()
    soup = BeautifulSoup(page)

    tag = 'img' if img else 'a'
    src = 'src' if img else 'href'
    aa = soup.findAll(tag)
    for a in aa:
        link = a.get(src)
        link = urljoin(url, link)
        print(link)
Ejemplo n.º 15
0
def chainref_cmd ( cmd, host='/tmp/chainrefd', log=None ):
    if ( len(cmd) > 1 ):
        arg = cmd[1]
    else:
        arg = None

    if ( cmd[0] not in COMMANDS ):
        print 'chainref-cmd: invalid command \'%s\'' % cmd[0]
        return

    if ( cmd[0].startswith('ajax') ):
        host = '/tmp/chainref-ajax-0'

    if ( arg != None ):
        content = COMMANDS[cmd[0]] ( arg )
    else:
        content = COMMANDS[cmd[0]] ( )

    s = socket.socket ( socket.AF_UNIX, socket.SOCK_STREAM )

    try:
        s.connect ( host )
        s.send ( content )
    except Exception,e:
        print 'chainref-cmd: communication with server failed: %s' % str(e)
        s.close ( )
        sys.exit ( 1 )
Ejemplo n.º 16
0
 def clean_up(self):
     """ Move DQ outputs to their appropriate directory """
     try:
         data_dir = os.environ["DATA"]
         plots_dir = os.environ["PLOTS"]
         logs_dir = os.environ["LOGS"]
     except KeyError as detail:
         print "GenerateSpectrum.clean_up: error", detail, "not set"
         print " --> source analysis environment scripts before running!"
         sys.exit(1)
     for root, dirs, files in os.walk(os.getcwd()):
         for file in files:
             is_data = re.search(r".*\.root$", file)
             is_plot = re.search(r".*\.png$", file)
             hostname = socket.gethostname()
             is_log =  re.search(r"^rat\."+hostname+r"\.[0-9]+\.log$", file)
             if is_data:
                 try:
                     root_file = TFile(file)
                     tree = root_file.Get("T")
                     tree.ls()
                 except ReferenceError as detail:
                     "generate_spectrum.clean_up: error in TFile,", detail
                     sys.exit(1)
                 file_manips.copy_file(os.path.join(root, file), data_dir)
             elif is_plot:
                 file_manips.copy_file(os.path.join(root, file), plots_dir)
             elif is_log:
                 file_manips.copy_file(os.path.join(root, file), logs_dir)
Ejemplo n.º 17
0
def main():

    parser = argparse.ArgumentParser(description='Deploy interface.')
    parser.add_argument('--version', action='version', version=APP + " " + VERSION)
    parser.add_argument('--logging', dest='log_level', action='store',
                        default='DEBUG', choices=['DEBUG', 'INFO'],
                        help='Minimum level of logging message to show. Default (DEBUG)')

    subparsers = parser.add_subparsers(dest='cmd')

    parser_a = subparsers.add_parser('install',
                                     help='Run install')
    parser_a.set_defaults(func=install)

    parser_a = subparsers.add_parser('uninstall',
                                     help='Run uninstall')
    parser_a.set_defaults(func=uninstall)

    if len(sys.argv) == 1:
        error_msg = "ERROR: No arguments supplied!"
        print >> sys.stderr, error_msg
        parser.print_help()
        sys.exit(1)

    args = parser.parse_args()

    logging.basicConfig(level=args.log_level,
                        format=LOGGING_FORMAT)

    args.func(args)
Ejemplo n.º 18
0
def lookup_command(command_name):
    """Lookup a command.

    command_name: the command name

    Returns: a method which implements that command
    """
    BASE_COMMANDS = {'help': print_help}

    REPLICATION_COMMANDS = {'compare': replication_compare,
                            'dump': replication_dump,
                            'livecopy': replication_livecopy,
                            'load': replication_load,
                            'size': replication_size}

    commands = {}
    for command_set in (BASE_COMMANDS, REPLICATION_COMMANDS):
        commands.update(command_set)

    try:
        command = commands[command_name]
    except KeyError:
        if command_name:
            sys.exit(_("Unknown command: %s") % command_name)
        else:
            command = commands['help']
    return command
Ejemplo n.º 19
0
    def read(self,filename):

        try:
            self.file = open(filename, 'r')
        except IOError, (errno, strerror):
            print "Error Opening %s. (%s): %s" % (filename,errno, strerror)
            sys.exit(1)
    def call_all_circuits_with_seed(self, command, iteration, seed):
        stats_pattern = re.compile(self.stats_regex, re.DOTALL)
        self.command = command

        for circuit in self.circuits:
            print('    ' + circuit)
            out, err = self.call_circuit(command, circuit, iteration, seed)

            if err:
                print(err)
                print('There was a problem with circuit "{0}"'.format(circuit))
                sys.exit(1)

            # Get and save statistics
            match = stats_pattern.search(out)

            if match is None:
                print(out)
                print('Failed to match pattern: {0}'.format(self.stats_regex))
                sys.exit(1)

            for metric in self.metrics:
                group_name = metric.lower().replace(' ', '_')
                result = match.group(group_name)
                if not result:
                    result = 0

                self.results[circuit][metric].append(float(result))
def initialize_browser(course, email, password):
    #Use mechanize to handle cookie
    print
    print 'Initialize browsering session...'
    br = mechanize.Browser()
    cj = cookielib.LWPCookieJar()
    br.set_cookiejar(cj)
    br.set_handle_equiv(True)
    #br.set_handle_gzip(True)
    br.set_handle_redirect(True)
    br.set_handle_referer(True)
    br.set_handle_robots(False)
    br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time = 0)
    auth_url = 'https://www.coursera.org/****/auth/auth_redirector?type=login&subtype=normal&email'.replace('****', course)
    br.open(auth_url)

    br.select_form(nr = 0)
    br.form['email'] =  email
    br.form['password'] = password
    br.submit()
    print 'It takes seconds to login and resolve resources to download...\n'

    #Check if email + password submitted correctly
    if 'https://www.coursera.org/****/auth/login_receiver?data='.replace('****', course) not in br.geturl():
        print 'Failed to login, exit...'
        sys.exit(1)

    video_lectures = 'https://www.coursera.org/****/lecture/index'.replace('****', course)
    br.open(video_lectures)
    return br
Ejemplo n.º 22
0
def main():
    topdir = detect_topdir()
    if topdir is None:
        print('Could not locate topdir. You must run ``gloabl-wasp`` '
              'either from a file tree'
              'containing a ``wasp`` file or from a build directory '
              'containing an initialized `{0}` file.'.format(CACHE_FILE))
        sys.exit(1)
    unpack_dir = os.path.join(topdir, UNPACK_DIR)
    if not os.path.exists(unpack_dir):
        fname = os.path.join(topdir, 'wasp')
        code = []
        with open(fname, 'r') as f:
            start = False
            for line in f:
                if 'wasp_packed=[' in line:
                    start = True
                if line == '\n' and start:
                    break
                if start:
                    code.append(line)
        vs = {}
        exec(''.join(code), vs, vs)
        unpack(unpack_dir, vs['wasp_packed'])
    sys.path.append(unpack_dir)
    run(topdir, unpack_dir)
Ejemplo n.º 23
0
 def auth_dialog(self):
     try:
         with open(CONF_FILE) as conf:
             token = json.load(conf)
     except (OSError, json.JSONDecodeError, KeyError):
         print("Your token file doesn't exist or is malformed.")
         print("If you WANT to proceed anonymously, pass"
               "`-a/--anonymous` on the command line.")
         print("Or you can create a new token now. "
               "Create token? [y/n] ", end='')
         if input().lower() in ('y', 'yes'):
             username = input('Username: '******'gist3_{}'.format(id_gen())
             resp = req.post(API_BASE + '/authorizations',
                             auth=(username, password),
                             json={'scopes': ['gist'],
                                   'note': note})
             if resp.status_code == req.codes.created:
                 token = resp.json()['token']
                 with open(CONF_FILE, 'w') as conf:
                     json.dump(token, conf)
                 print('Token created & saved to {}'.format(CONF_FILE))
             else:
                 print('There was an error from github: ')
                 print(json.dumps(resp.json(), sort_keys=True, indent=4))
                 sys.exit(2)
         else:
             print('Aborting...')
             sys.exit(0)
     return AccessTokenAuth(token)
def option_none(option, opt, value, parser):
    """ checks a parameter for taking value"""
    if parser.rargs and not parser.rargs[0].startswith('-'):
        print "Option arg error"
        print opt, " option should be empty"
        sys.exit(2)
    setattr(parser.values, option.dest, True)
Ejemplo n.º 25
0
def main():
    if sys.argv[1:]:
        try:
            fp = open(sys.argv[1], 'r')
        except IOError, msg:
            print 'Can\'t open "%s":' % sys.argv[1], msg
            sys.exit(1)
Ejemplo n.º 26
0
def main():
    idir, ofile, dffile = _parse_cmdline()

    print u'Loading doc-freqs file {}...'.format(dffile)
    with open(dffile, 'rb') as f:
        df = pickle.load(f)    

    print u'Reading input directory: {}'.format(idir)
    jobs = _load_jobs(idir, df)

    # Do the work.
    pool = Pool(4)
    njobs = len(jobs)

    try:
        import sys
        with codecs.open(ofile, 'wb') as pf:
            pickle.dump(njobs, pf)
            results = pool.imap_unordered(worker, jobs)
            for i, result in enumerate(results, 1):
                pickle.dump(result, pf)
                per = 100 * (float(i) / njobs)
                sys.stdout.write(u'\rPercent Complete: {:2.3f}%'.format(per))
                sys.stdout.flush()
            sys.stdout.write(u'\rPercent Complete: 100%    \n')
            sys.stdout.flush()

    except KeyboardInterrupt:
        sys.stdout.write(u'\rPercent Complete: {:2.3f}%    \n'.format(per))
        sys.stdout.write(u'Shutting down.\n')
        sys.stdout.flush()
        sys.exit()

    print u'Complete!'
Ejemplo n.º 27
0
	def setInitialCondition(self,U0):
		if np.shape(self.U) != np.shape(U0):
			print "Wrong shape",np.shape(U0)," of initial condition! Must match shape of mesh",np.shape(self.mesh),". Exiting"
			# U0 = U0[:np.shape(self.U)[0],:np.shape(self.U)[-1]]
			import sys
			sys.exit(0)
		self.Up = U0
def main():
    print '----------------------------------'
    print '-    Coursera.org Downloader     -'
    print '-         by Logan Ding          -'
    print '----------------------------------'
    print
    # Add courses by yourself. Not all tested. You can feed back.
    course = { '1' : 'modelthinking',
               '2' : 'gametheory',  
               '3' : 'crypto',
               '4' : 'saas',
               '5' : 'pgm', 
               '6' : 'algo'}

    # Your Coursera.org email and password needed here to download videos. 
    email = 'youremail'
    password = '******'

    if email == 'youremail':
        print 'You must change the email and the password to yours in main() first.'
        sys.exit(1)

    path  = download_path()
    print 'All files will be downloaded to:', path
    print
    course = choose_course(course)
    br = initialize_browser(course, email, password)
    mp4, pdf, pptx = resolve_resources(br, path)
    downloader(mp4, pdf, pptx, br, path)
Ejemplo n.º 29
0
    def parse_subcommands(self, gparser=None):
        """
        Parse global arguments, find subcommand from list of subcommand
        objects, parse local subcommand arguments and return a tuple of
        global options, selected command object, command options, and
        command arguments.

        Call execute() on the command object to run. The command object has
        members 'gopts' and 'opts' set for global and command options
        respectively, you don't need to call execute with those but you could
        if you wanted to.
        """
        gparser = gparser or self.gparser
        # parse global options.
        gopts, args = gparser.parse_args()
        if not args:
            gparser.print_help()
            raise SystemExit("\nError: you must specify an action.")
        # set debug level if specified
        if gopts.DEBUG:
            console.setLevel(logger.DEBUG)
            config.DEBUG_CONFIG = True
        # load StarClusterConfig into global options
        try:
            cfg = config.StarClusterConfig(gopts.CONFIG)
            cfg.load()
        except exception.ConfigNotFound, e:
            log.error(e.msg)
            e.display_options()
            sys.exit(1)
Ejemplo n.º 30
0
def _maybe_extract(fpath, dirname, descend=True):
    path = os.path.dirname(fpath)
    untar_fpath = os.path.join(path, dirname)
    if not os.path.exists(untar_fpath):
        print('Extracting contents of "{}"...'.format(dirname))
        tfile = zipfile.ZipFile(fpath, 'r')
        try:
            tfile.extractall(untar_fpath)
        except (Exception, KeyboardInterrupt) as e:
            if os.path.exists(untar_fpath):
                if os.path.isfile(untar_fpath):
                    os.remove(untar_fpath)
                else:
                    shutil.rmtree(untar_fpath)
            raise
        tfile.close()
    if descend:
        dirs = [os.path.join(untar_fpath, o)
                for o in os.listdir(untar_fpath)
                if os.path.isdir(os.path.join(untar_fpath, o))]
        if len(dirs) != 1:
            print("Error, found not exactly one dir: {}".format(dirs))
            sys.exit(-1)
        return dirs[0]
    else:
        return untar_fpath
Ejemplo n.º 31
0
def exit_with_usage(code=1):
    print("Usage: {0} [{1}]".format(sys.argv[0],
                                    '|'.join('--' + opt
                                             for opt in valid_opts)),
          file=sys.stderr)
    sys.exit(code)
Ejemplo n.º 32
0
def main():
    def print_stacktrace_if_debug():
        debug_flag = False
        if 'args' in vars() and 'debug' in args:
            debug_flag = args.debug

        if debug_flag:
            traceback.print_exc(file=sys.stdout)
            error(traceback.format_exc())
    try:
        description = ['~~~CRISPRessoWGS~~~','-Analysis of CRISPR/Cas9 outcomes from WGS data-']
        wgs_string = r'''
 ____________
|     __  __ |
||  |/ _ (_  |
||/\|\__)__) |
|____________|
        '''
        print(CRISPRessoShared.get_crispresso_header(description,wgs_string))

        parser = CRISPRessoShared.getCRISPRessoArgParser(parserTitle = 'CRISPRessoWGS Parameters',requiredParams={})

        #tool specific optional
        parser.add_argument('-b','--bam_file', type=str,  help='WGS aligned bam file', required=True,default='bam filename' )
        parser.add_argument('-f','--region_file', type=str,  help='Regions description file. A BED format  file containing the regions to analyze, one per line. The REQUIRED\
        columns are: chr_id(chromosome name), bpstart(start position), bpend(end position), the optional columns are:name (an unique indentifier for the region), guide_seq, expected_hdr_amplicon_seq,coding_seq, see CRISPResso help for more details on these last 3 parameters)', required=True)
        parser.add_argument('-r','--reference_file', type=str, help='A FASTA format reference file (for example hg19.fa for the human genome)', default='',required=True)
        parser.add_argument('--min_reads_to_use_region',  type=float, help='Minimum number of reads that align to a region to perform the CRISPResso analysis', default=10)
        parser.add_argument('--skip_failed',  help='Continue with pooled analysis even if one sample fails',action='store_true')
        parser.add_argument('--gene_annotations', type=str, help='Gene Annotation Table from UCSC Genome Browser Tables (http://genome.ucsc.edu/cgi-bin/hgTables?command=start), \
        please select as table "knownGene", as output format "all fields from selected table" and as file returned "gzip compressed"', default='')
        parser.add_argument('-p','--n_processes',type=int, help='Specify the number of processes to use for the quantification.\
        Please use with caution since increasing this parameter will increase the memory required to run CRISPResso.',default=1)
        parser.add_argument('--crispresso_command', help='CRISPResso command to call',default='CRISPResso')

        args = parser.parse_args()

        crispresso_options = CRISPRessoShared.get_crispresso_options()
        options_to_ignore = set(['fastq_r1','fastq_r2','amplicon_seq','amplicon_name','output_folder','name'])
        crispresso_options_for_wgs = list(crispresso_options-options_to_ignore)

        info('Checking dependencies...')

        if check_samtools() and check_bowtie2():
            info('\n All the required dependencies are present!')
        else:
            sys.exit(1)

        #check files
        check_file(args.bam_file)

        check_file(args.reference_file)

        check_file(args.region_file)

        if args.gene_annotations:
            check_file(args.gene_annotations)


        #INIT
        get_name_from_bam=lambda  x: os.path.basename(x).replace('.bam','')

        if not args.name:
            database_id='%s' % get_name_from_bam(args.bam_file)
        else:
            database_id=args.name


        OUTPUT_DIRECTORY='CRISPRessoWGS_on_%s' % database_id

        if args.output_folder:
                 OUTPUT_DIRECTORY=os.path.join(os.path.abspath(args.output_folder),OUTPUT_DIRECTORY)

        _jp=lambda filename: os.path.join(OUTPUT_DIRECTORY,filename) #handy function to put a file in the output directory

        try:
                 info('Creating Folder %s' % OUTPUT_DIRECTORY)
                 os.makedirs(OUTPUT_DIRECTORY)
                 info('Done!')
        except:
                 warn('Folder %s already exists.' % OUTPUT_DIRECTORY)

        log_filename=_jp('CRISPRessoWGS_RUNNING_LOG.txt')
        logging.getLogger().addHandler(logging.FileHandler(log_filename))

        with open(log_filename,'w+') as outfile:
                  outfile.write('[Command used]:\n%s\n\n[Execution log]:\n' % ' '.join(sys.argv))

        crispresso2WGS_info_file = os.path.join(OUTPUT_DIRECTORY,'CRISPResso2WGS_info.pickle')
        crispresso2_info = {} #keep track of all information for this run to be pickled and saved at the end of the run
        crispresso2_info['version'] = CRISPRessoShared.__version__
        crispresso2_info['args'] = deepcopy(args)

        crispresso2_info['log_filename'] = os.path.basename(log_filename)

        def rreplace(s, old, new):
            li = s.rsplit(old)
            return new.join(li)

        bam_index = ''
        #check if bam has the index already
        if os.path.exists(rreplace(args.bam_file,".bam",".bai")):
            info('Index file for input .bam file exists, skipping generation.')
            bam_index = args.bam_file.replace(".bam",".bai")
        elif os.path.exists(args.bam_file+'.bai'):
            info('Index file for input .bam file exists, skipping generation.')
            bam_index = args.bam_file+'.bai'
        else:
            info('Creating index file for input .bam file...')
            sb.call('samtools index %s ' % (args.bam_file),shell=True)
            bam_index = args.bam_file+'.bai'


        #load gene annotation
        if args.gene_annotations:
            info('Loading gene coordinates from annotation file: %s...' % args.gene_annotations)
            try:
                df_genes=pd.read_table(args.gene_annotations,compression='gzip')
                df_genes.txEnd=df_genes.txEnd.astype(int)
                df_genes.txStart=df_genes.txStart.astype(int)
                df_genes.head()
            except:
                info('Failed to load the gene annotations file.')


        #Load and validate the REGION FILE
        df_regions=pd.read_csv(args.region_file,names=[
                'chr_id','bpstart','bpend','Name','sgRNA',
                'Expected_HDR','Coding_sequence'],comment='#',sep='\t',dtype={'Name':str})


        #remove empty amplicons/lines
        df_regions.dropna(subset=['chr_id','bpstart','bpend'],inplace=True)

        df_regions.Expected_HDR=df_regions.Expected_HDR.apply(capitalize_sequence)
        df_regions.sgRNA=df_regions.sgRNA.apply(capitalize_sequence)
        df_regions.Coding_sequence=df_regions.Coding_sequence.apply(capitalize_sequence)


        #check or create names
        for idx,row in df_regions.iterrows():
            if pd.isnull(row.Name):
                df_regions.ix[idx,'Name']='_'.join(map(str,[row['chr_id'],row['bpstart'],row['bpend']]))


        if not len(df_regions.Name.unique())==df_regions.shape[0]:
            raise Exception('The amplicon names should be all distinct!')

        df_regions=df_regions.set_index('Name')
        #df_regions.index=df_regions.index.str.replace(' ','_')
        df_regions.index=df_regions.index.to_series().str.replace(' ','_')

        #extract sequence for each region
        uncompressed_reference=args.reference_file

        if os.path.exists(uncompressed_reference+'.fai'):
            info('The index for the reference fasta file is already present! Skipping generation.')
        else:
            info('Indexing reference file... Please be patient!')
            sb.call('samtools faidx %s >>%s 2>&1' % (uncompressed_reference,log_filename),shell=True)

        df_regions['sequence']=df_regions.apply(lambda row: get_region_from_fa(row.chr_id,row.bpstart,row.bpend,uncompressed_reference),axis=1)

        for idx,row in df_regions.iterrows():

            if not pd.isnull(row.sgRNA):

                cut_points=[]

                for current_guide_seq in row.sgRNA.strip().upper().split(','):

                    wrong_nt=find_wrong_nt(current_guide_seq)
                    if wrong_nt:
                        raise NTException('The sgRNA sequence %s contains wrong characters:%s'  % (current_guide_seq, ' '.join(wrong_nt)))

                    offset_fw=args.quantification_window_center+len(current_guide_seq)-1
                    offset_rc=(-args.quantification_window_center)-1
                    cut_points+=[m.start() + offset_fw for \
                                m in re.finditer(current_guide_seq,  row.sequence)]+[m.start() + offset_rc for m in re.finditer(CRISPRessoShared.reverse_complement(current_guide_seq),  row.sequence)]

                if not cut_points:
                    df_regions.ix[idx,'sgRNA']=''

        df_regions['bpstart'] = pd.to_numeric(df_regions['bpstart'])
        df_regions['bpend'] = pd.to_numeric(df_regions['bpend'])

        df_regions.bpstart=df_regions.bpstart.astype(int)
        df_regions.bpend=df_regions.bpend.astype(int)

        if args.gene_annotations:
            df_regions=df_regions.apply(lambda row: find_overlapping_genes(row, df_genes),axis=1)


        #extract reads with samtools in that region and create a bam
        #create a fasta file with all the trimmed reads
        info('\nProcessing each region...')

        ANALYZED_REGIONS=_jp('ANALYZED_REGIONS/')
        if not os.path.exists(ANALYZED_REGIONS):
            os.mkdir(ANALYZED_REGIONS)

        df_regions['n_reads']=0
        df_regions['bam_file_with_reads_in_region']=''
        df_regions['fastq.gz_file_trimmed_reads_in_region']=''

        for idx,row in df_regions.iterrows():

            if row['sequence']:

                fastq_gz_filename=os.path.join(ANALYZED_REGIONS,'%s.fastq.gz' % clean_filename('REGION_'+str(idx)))
                bam_region_filename=os.path.join(ANALYZED_REGIONS,'%s.bam' % clean_filename('REGION_'+str(idx)))

                #create place-holder fastq files
                open(fastq_gz_filename, 'w+').close()

                region='%s:%d-%d' % (row.chr_id,row.bpstart,row.bpend-1)
                info('\nExtracting reads in:%s and create the .bam file: %s' % (region,bam_region_filename))

                #extract reads in region
                cmd=r'''samtools view -b -F 4 %s %s > %s ''' % (args.bam_file, region, bam_region_filename)
                #print cmd
                sb.call(cmd,shell=True)


                #index bam file
                cmd=r'''samtools index %s ''' % (bam_region_filename)
                #print cmd
                sb.call(cmd,shell=True)

                info('Trim reads and create a fastq.gz file in: %s' % fastq_gz_filename)
                #trim reads in bam and convert in fastq
                n_reads=write_trimmed_fastq(bam_region_filename,row['bpstart'],row['bpend'],fastq_gz_filename)
                df_regions.ix[idx,'n_reads']=n_reads
                df_regions.ix[idx,'bam_file_with_reads_in_region']=bam_region_filename
                df_regions.ix[idx,'fastq.gz_file_trimmed_reads_in_region']=fastq_gz_filename


        df_regions.fillna('NA').to_csv(_jp('REPORT_READS_ALIGNED_TO_SELECTED_REGIONS_WGS.txt'),sep='\t')

        #Run Crispresso
        info('\nRunning CRISPResso on each region...')
        crispresso_cmds = []
        for idx,row in df_regions.iterrows():

               if row['n_reads']>=args.min_reads_to_use_region:
                    info('\nThe region [%s] has enough reads (%d) mapped to it!' % (idx,row['n_reads']))

                    crispresso_cmd= args.crispresso_command + ' -r1 %s -a %s -o %s --name %s' %\
                    (row['fastq.gz_file_trimmed_reads_in_region'],row['sequence'],OUTPUT_DIRECTORY,idx)

                    if row['sgRNA'] and not pd.isnull(row['sgRNA']):
                        crispresso_cmd+=' -g %s' % row['sgRNA']

                    if row['Expected_HDR'] and not pd.isnull(row['Expected_HDR']):
                        crispresso_cmd+=' -e %s' % row['Expected_HDR']

                    if row['Coding_sequence'] and not pd.isnull(row['Coding_sequence']):
                        crispresso_cmd+=' -c %s' % row['Coding_sequence']

                    crispresso_cmd=CRISPRessoShared.propagate_crispresso_options(crispresso_cmd,crispresso_options_for_wgs,args)
                    crispresso_cmds.append(crispresso_cmd)
#                    info('Running CRISPResso:%s' % crispresso_cmd)
#                    sb.call(crispresso_cmd,shell=True)

               else:
                    info('\nThe region [%s] has too few reads mapped to it (%d)! Not running CRISPResso!' % (idx,row['n_reads']))

        CRISPRessoMultiProcessing.run_crispresso_cmds(crispresso_cmds,args.n_processes,'region',args.skip_failed)

        quantification_summary=[]
        all_region_names = []
        all_region_read_counts = {}
        good_region_names = []
        good_region_folders = {}
        header = 'Name\tUnmodified%\tModified%\tReads_aligned\tReads_total\tUnmodified\tModified\tDiscarded\tInsertions\tDeletions\tSubstitutions\tOnly Insertions\tOnly Deletions\tOnly Substitutions\tInsertions and Deletions\tInsertions and Substitutions\tDeletions and Substitutions\tInsertions Deletions and Substitutions'
        header_els = header.split("\t")
        header_el_count = len(header_els)
        empty_line_els = [np.nan]*(header_el_count-1)
        n_reads_index = header_els.index('Reads_total') - 1
        for idx,row in df_regions.iterrows():
            folder_name='CRISPResso_on_%s' % idx
            run_name = idx

            all_region_names.append(run_name)
            all_region_read_counts[run_name] = row.n_reads

            run_file = os.path.join(_jp(folder_name),'CRISPResso2_info.pickle')
            if not os.path.exists(run_file):
                warn('Skipping the folder %s: not enough reads, incomplete, or empty folder.'% folder_name)
                this_els = empty_line_els[:]
                this_els[n_reads_index] = row.n_reads
                to_add = [run_name]
                to_add.extend(this_els)
                quantification_summary.append(to_add)
            else:
                run_data = cp.load(open(run_file,'rb'))
                ref_name = run_data['ref_names'][0] #only expect one amplicon sequence
                n_tot = row.n_reads
                n_aligned = run_data['counts_total'][ref_name]
                n_unmod = run_data['counts_unmodified'][ref_name]
                n_mod = run_data['counts_modified'][ref_name]
                n_discarded = run_data['counts_discarded'][ref_name]

                n_insertion = run_data['counts_insertion'][ref_name]
                n_deletion = run_data['counts_deletion'][ref_name]
                n_substitution = run_data['counts_substitution'][ref_name]
                n_only_insertion = run_data['counts_only_insertion'][ref_name]
                n_only_deletion = run_data['counts_only_deletion'][ref_name]
                n_only_substitution = run_data['counts_only_substitution'][ref_name]
                n_insertion_and_deletion = run_data['counts_insertion_and_deletion'][ref_name]
                n_insertion_and_substitution = run_data['counts_insertion_and_substitution'][ref_name]
                n_deletion_and_substitution = run_data['counts_deletion_and_substitution'][ref_name]
                n_insertion_and_deletion_and_substitution = run_data['counts_insertion_and_deletion_and_substitution'][ref_name]

                unmod_pct = "NA"
                mod_pct = "NA"
                if n_aligned > 0:
                    unmod_pct = 100*n_unmod/float(n_aligned)
                    mod_pct = 100*n_mod/float(n_aligned)

                vals = [run_name]
                vals.extend([round(unmod_pct,8),round(mod_pct,8),n_aligned,n_tot,n_unmod,n_mod,n_discarded,n_insertion,n_deletion,n_substitution,n_only_insertion,n_only_deletion,n_only_substitution,n_insertion_and_deletion,n_insertion_and_substitution,n_deletion_and_substitution,n_insertion_and_deletion_and_substitution])
                quantification_summary.append(vals)

                good_region_names.append(idx)
                good_region_folders[idx] = folder_name
        samples_quantification_summary_filename = _jp('SAMPLES_QUANTIFICATION_SUMMARY.txt')

        df_summary_quantification=pd.DataFrame(quantification_summary,columns=header_els)
        if args.crispresso1_mode:
            crispresso1_columns=['Name','Unmodified%','Modified%','Reads_aligned','Reads_total']
            df_summary_quantification.fillna('NA').to_csv(samples_quantification_summary_filename,sep='\t',index=None,columns=crispresso1_columns)
        else:
            df_summary_quantification.fillna('NA').to_csv(samples_quantification_summary_filename,sep='\t',index=None)

        crispresso2_info['samples_quantification_summary_filename'] = os.path.basename(samples_quantification_summary_filename)
        crispresso2_info['regions'] = df_regions
        crispresso2_info['all_region_names'] = all_region_names
        crispresso2_info['all_region_read_counts'] = all_region_read_counts
        crispresso2_info['good_region_names'] = good_region_names
        crispresso2_info['good_region_folders'] = good_region_folders

        crispresso2_info['summary_plot_names'] = []
        crispresso2_info['summary_plot_titles'] = {}
        crispresso2_info['summary_plot_labels'] = {}
        crispresso2_info['summary_plot_datas'] = {}

        df_summary_quantification.set_index('Name')

        save_png = True
        if args.suppress_report:
            save_png = False

        plot_root = _jp("CRISPRessoWGS_reads_summary")
        CRISPRessoPlot.plot_reads_total(plot_root,df_summary_quantification,save_png,args.min_reads_to_use_region)
        plot_name = os.path.basename(plot_root)
        crispresso2_info['reads_summary_plot'] = plot_name
        crispresso2_info['summary_plot_names'].append(plot_name)
        crispresso2_info['summary_plot_titles'][plot_name] = 'CRISPRessoWGS Read Allocation Summary'
        crispresso2_info['summary_plot_labels'][plot_name] = 'Each bar shows the total number of reads allocated to each amplicon. The vertical line shows the cutoff for analysis, set using the --min_reads_to_use_region parameter.'
        crispresso2_info['summary_plot_datas'][plot_name] = [('CRISPRessoWGS summary',os.path.basename(samples_quantification_summary_filename))]

        plot_root = _jp("CRISPRessoWGS_modification_summary")
        CRISPRessoPlot.plot_unmod_mod_pcts(plot_root,df_summary_quantification,save_png,args.min_reads_to_use_region)
        plot_name = os.path.basename(plot_root)
        crispresso2_info['modification_summary_plot'] = plot_name
        crispresso2_info['summary_plot_names'].append(plot_name)
        crispresso2_info['summary_plot_titles'][plot_name] = 'CRISPRessoWGS Modification Summary'
        crispresso2_info['summary_plot_labels'][plot_name] = 'Each bar shows the total number of reads aligned to each amplicon, divided into the reads that are modified and unmodified. The vertical line shows the cutoff for analysis, set using the --min_reads_to_use_region parameter.'
        crispresso2_info['summary_plot_datas'][plot_name] = [('CRISPRessoWGS summary',os.path.basename(samples_quantification_summary_filename))]

        if not args.suppress_report:
            if (args.place_report_in_output_folder):
                report_name = _jp("CRISPResso2WGS_report.html")
            else:
                report_name = OUTPUT_DIRECTORY+'.html'
            CRISPRessoReport.make_wgs_report_from_folder(report_name,crispresso2_info,OUTPUT_DIRECTORY,_ROOT)
            crispresso2_info['report_location'] = report_name
            crispresso2_info['report_filename'] = os.path.basename(report_name)

        cp.dump(crispresso2_info, open(crispresso2WGS_info_file, 'wb' ) )

        info('Analysis Complete!')
        print(CRISPRessoShared.get_crispresso_footer())
        sys.exit(0)

    except Exception as e:
        print_stacktrace_if_debug()
        error('\n\nERROR: %s' % e)
        sys.exit(-1)
Ejemplo n.º 33
0
def RunDaVsTurns(db,force,outfile,outfileold,turnstep,davstfit,fitdat,fitdaterr,fitndrop,fitskap,fitekap,fitdkap,outfilefit):
  '''Da vs turns -- calculate da vs turns for study dbname, if davstfit=True also fit the data'''
  #---- calculate the da vs turns
  try:
    turnstep=int(float(turnstep))
  except [ValueError,NameError,TypeError]:
    print('Error in RunDaVsTurns: turnstep must be an integer values!')
    sys.exit(0)
  if(not db.check_seeds()):
    print('!!! Seeds are missing in database !!!')
  turnsl=db.env_var['turnsl']#get turnsl for outputfile names
  turnse=db.env_var['turnse']
  for seed in db.get_db_seeds():
    seed=int(seed)
    print('analyzing seed {0} ...').format(str(seed))
    for tune in db.get_db_tunes():
      print('analyzing tune {0} ...').format(str(tune))
      dirname=db.mk_analysis_dir(seed,tune)#directory struct already created in clean_dir_da_vst, only get dir name (string) here
      print('... get survival data')
      dasurv= db.get_surv(seed,tune)
      print('... get da vs turns data')
      daout = db.get_da_vst(seed,tune)
      if(len(daout)>0):#reload data, if input data has changed redo the analysis
        an_mtime=daout['mtime'].min()
        res_mtime=db.execute('SELECT max(mtime) FROM six_results')[0][0]
        if res_mtime>an_mtime or force is True:
          files=('DA.%s.out DAsurv.%s.out DA.%s.png DAsurv.%s.png DAsurv_log.%s.png DAsurv_comp.%s.png DAsurv_comp_log.%s.png'%(turnse,turnse,turnse,turnse,turnse,turnse,turnse)).split()+['DA.out','DAsurv.out','DA.png','DAsurv.png','DAsurv_log.png','DAsurv_comp.png','DAsurv_comp_log.png']
          clean_dir_da_vst(db,files)# create directory structure and delete old files
          print('... input data has changed or force=True - recalculate da vs turns')
          daout=mk_da_vst(dasurv,seed,tune,turnsl,turnstep)
          print('.... save data in database')
          #check if old table name da_vsturn exists, if yes delete it
          if(db.check_table('da_vsturn')):
            print('... delete old table da_vsturn - table will be substituted by new table da_vst')
            db.execute("DROP TABLE da_vsturn")
          db.st_da_vst(daout,recreate=True)
      else:#create data
        print('... calculate da vs turns')
        daout=mk_da_vst(dasurv,seed,tune,turnsl,turnstep)
        print('.... save data in database')
        db.st_da_vst(daout,recreate=False)
      if(outfile):# create dasurv.out and da.out files
        fnsurv='%s/DAsurv.%s.out'%(dirname,turnse)
        save_dasurv(dasurv,fnsurv)
        print('... save survival data in {0}').format(fnsurv)
        fndaout='%s/DA.%s.out'%(dirname,turnse)
        save_daout(daout,fndaout)
        print('... save da vs turns data in {0}').format(fndaout)
      if(outfileold):
        fndaoutold='%s/DAold.%s.out'%(dirname,turnse)
        save_daout_old(daout,fndaoutold)
        print('... save da vs turns (old data format) data in {0}').format(fndaoutold)
  #---- fit the data
  if(davstfit):
    if(fitdat in ['dawtrap','dastrap','dawsimp','dassimp']):
      if(fitdaterr in ['none','dawtraperr','dastraperr','dastraperrep','dastraperrepang','dastraperrepamp','dawsimperr','dassimperr']):
        try:
          fitndrop=int(float(fitndrop))
        except [ValueError,NameError,TypeError]:
          print('Error in RunDaVsTurns: fitndrop must be an integer values! - Aborting!')
          sys.exit(0)
        try:
          fitskap=float(fitskap)
          fitekap=float(fitekap)
          fitdkap=float(fitdkap)
        except [ValueError,NameError,TypeError]:
          print('Error in RunDaVsTurns: fitskap,fitekap and fitdkap must be an float values! - Aborting!')
          sys.exit(0)
        if((np.arange(fitskap,fitekap+fitdkap,fitdkap)).any()):
          for tune in db.get_db_tunes():
            print('fit da vs turns for tune {0} ...').format(str(tune))
            fitdaout=mk_da_vst_fit(db,tune,fitdat,fitdaterr,fitndrop,fitskap,fitekap,fitdkap)
            print('.... save fitdata in database')
            db.st_da_vst_fit(fitdaout,recreate=False)
            if(outfilefit):
              (tunex,tuney)=tune
              sixdesktunes="%g_%g"%(tunex,tuney)
              fndot='%s/DAfit.%s.%s.%s.%s.%s.plot'%(db.mk_analysis_dir(),db.LHCDescrip,sixdesktunes,turnse,fitdat,fitdaterr)
              save_davst_fit(fitdaout,fndot)
              print('... save da vs turns fit data in {0}').format(fndot)
        else:
          print('Error in RunDaVsTurns: empty scan range for fitkap!')
      else:
        print("Error in -fitopt: <dataerr> has to be 'none','dawtraperr','dastraperr','dastraperrep','dastraperrepang','dastraperrepamp','dawsimperr' or 'dassimperr' - Aborting!")
        sys.exit(0)
    else:
      print("Error in -fitopt: <data> has to be 'dawtrap','dastrap','dawsimp' or 'dassimp' - Aborting!")
      sys.exit(0)
Ejemplo n.º 34
0
from PyQt5 import QtGui
from PyQt5.QtCore import pyqtSlot  # pyqtSlot 프로퍼티를 사용하기 위함


class Form(QtWidgets.QDialog):
    def __init__(self, parent=None):
        QtWidgets.QDialog.__init__(self, parent)
        self.setGeometry(100, 100, 320, 105)

        # QRadioButton 생성
        # 생성시 인자값으로 부모만 지정, 아이콘과 텍스트는 생성후 설정
        rb_1 = QtWidgets.QRadioButton(self)
        rb_1.setText("QRadioButton_1")
        icon = QtGui.QIcon("air.ico")
        rb_1.setIcon(icon)

        # 텍스트를 첫번째 인자값으로 지정후, 생성
        rb_2 = QtWidgets.QRadioButton("QRadioButton_2", self)

        vbox = QtWidgets.QVBoxLayout()  # 버튼들의 정렬을 위한 레이아웃 위젯
        vbox.addWidget(rb_1)
        vbox.addWidget(rb_2)
        self.setLayout(vbox)


if __name__ == '__main__':
    app = QtWidgets.QApplication(sys.argv)
    w = Form()
    w.show()
    sys.exit(app.exec())
Ejemplo n.º 35
0
def main():
    global args, workdir

    parser = argparse.ArgumentParser(description='Script for running full Gitian builds.')
    parser.add_argument('-c', '--commit', action='store_true', dest='commit', help='Indicate that the version argument is for a commit or branch')
    parser.add_argument('-p', '--pull', action='store_true', dest='pull', help='Indicate that the version argument is the number of a github repository pull request')
    parser.add_argument('-u', '--url', dest='url', default='https://github.com/polispay/polis', help='Specify the URL of the repository. Default is %(default)s')
    parser.add_argument('-v', '--verify', action='store_true', dest='verify', help='Verify the Gitian build')
    parser.add_argument('-b', '--build', action='store_true', dest='build', help='Do a Gitian build')
    parser.add_argument('-s', '--sign', action='store_true', dest='sign', help='Make signed binaries for Windows and MacOS')
    parser.add_argument('-B', '--buildsign', action='store_true', dest='buildsign', help='Build both signed and unsigned binaries')
    parser.add_argument('-o', '--os', dest='os', default='lwm', help='Specify which Operating Systems the build is for. Default is %(default)s. l for Linux, w for Windows, m for MacOS')
    parser.add_argument('-j', '--jobs', dest='jobs', default='2', help='Number of processes to use. Default %(default)s')
    parser.add_argument('-m', '--memory', dest='memory', default='2000', help='Memory to allocate in MiB. Default %(default)s')
    parser.add_argument('-k', '--kvm', action='store_true', dest='kvm', help='Use KVM instead of LXC')
    parser.add_argument('-d', '--docker', action='store_true', dest='docker', help='Use Docker instead of LXC')
    parser.add_argument('-S', '--setup', action='store_true', dest='setup', help='Set up the Gitian building environment. Only works on Debian-based systems (Ubuntu, Debian)')
    parser.add_argument('-D', '--detach-sign', action='store_true', dest='detach_sign', help='Create the assert file for detached signing. Will not commit anything.')
    parser.add_argument('-n', '--no-commit', action='store_false', dest='commit_files', help='Do not commit anything to git')
    parser.add_argument('signer', nargs='?', help='GPG signer to sign each build assert file')
    parser.add_argument('version', nargs='?', help='Version number, commit, or branch to build. If building a commit or branch, the -c option must be specified')

    args = parser.parse_args()
    workdir = os.getcwd()

    args.is_bionic = b'bionic' in subprocess.check_output(['lsb_release', '-cs'])

    if args.kvm and args.docker:
        raise Exception('Error: cannot have both kvm and docker')

    # Ensure no more than one environment variable for gitian-builder (USE_LXC, USE_VBOX, USE_DOCKER) is set as they
    # can interfere (e.g., USE_LXC being set shadows USE_DOCKER; for details see gitian-builder/libexec/make-clean-vm).
    os.environ['USE_LXC'] = ''
    os.environ['USE_VBOX'] = ''
    os.environ['USE_DOCKER'] = ''
    if args.docker:
        os.environ['USE_DOCKER'] = '1'
    elif not args.kvm:
        os.environ['USE_LXC'] = '1'
        if 'GITIAN_HOST_IP' not in os.environ.keys():
            os.environ['GITIAN_HOST_IP'] = '10.0.3.1'
        if 'LXC_GUEST_IP' not in os.environ.keys():
            os.environ['LXC_GUEST_IP'] = '10.0.3.5'

    if args.setup:
        setup()

    if args.buildsign:
        args.build = True
        args.sign = True

    if not args.build and not args.sign and not args.verify:
        sys.exit(0)

    args.linux = 'l' in args.os
    args.windows = 'w' in args.os
    args.macos = 'm' in args.os

    # Disable for MacOS if no SDK found
    if args.macos and not os.path.isfile('gitian-builder/inputs/MacOSX10.11.sdk.tar.gz'):
        print('Cannot build for MacOS, SDK does not exist. Will build for other OSes')
        args.macos = False

    args.sign_prog = 'true' if args.detach_sign else 'gpg --detach-sign'

    script_name = os.path.basename(sys.argv[0])
    if not args.signer:
        print(script_name+': Missing signer')
        print('Try '+script_name+' --help for more information')
        sys.exit(1)
    if not args.version:
        print(script_name+': Missing version')
        print('Try '+script_name+' --help for more information')
        sys.exit(1)

    # Add leading 'v' for tags
    if args.commit and args.pull:
        raise Exception('Cannot have both commit and pull')
    args.commit = ('' if args.commit else 'v') + args.version

    os.chdir('polis')
    if args.pull:
        subprocess.check_call(['git', 'fetch', args.url, 'refs/pull/'+args.version+'/merge'])
        os.chdir('../gitian-builder/inputs/polis')
        subprocess.check_call(['git', 'fetch', args.url, 'refs/pull/'+args.version+'/merge'])
        args.commit = subprocess.check_output(['git', 'show', '-s', '--format=%H', 'FETCH_HEAD'], universal_newlines=True, encoding='utf8').strip()
        args.version = 'pull-' + args.version
    print(args.commit)
    subprocess.check_call(['git', 'fetch'])
    subprocess.check_call(['git', 'checkout', args.commit])
    os.chdir(workdir)

    os.chdir('gitian-builder')
    subprocess.check_call(['git', 'pull'])
    os.chdir(workdir)

    if args.build:
        build()

    if args.sign:
        sign()

    if args.verify:
        os.chdir('gitian.sigs')
        subprocess.check_call(['git', 'pull'])
        os.chdir(workdir)
        sys.exit(verify())
Ejemplo n.º 36
0

        if marker_state:
            self.ax.plot(values, plot_input, color= current_color, marker= marker, markersize= marker_size)
        else:
            self.ax.plot(values, plot_input, color= current_color)

        self.ax.set_xlim(values[0], values[-1])
        self.draw()


#--- Class For Alignment ---#
class  MyQLabel(QtGui.QLabel):
    def __init__(self, label, ha='left',  parent=None):
        super(MyQLabel, self).__init__(label,parent)
        if ha == 'center':
            self.setAlignment(QtCore.Qt.AlignCenter)
        elif ha == 'right':
            self.setAlignment(QtCore.Qt.AlignRight)
        else:
            self.setAlignment(QtCore.Qt.AlignLeft)
if __name__ == '__main__':


    app = QtGui.QApplication(sys.argv)

    ui = FunctionViewer()
    ui.show()

    sys.exit(app.exec_())
Ejemplo n.º 37
0
def check_library(library_name):
        try:
                return __import__(library_name)
        except:
                error('You need to install %s module to use CRISPRessoWGS!' % library_name)
                sys.exit(1)
Ejemplo n.º 38
0
        rospy.loginfo('Going to stop task.')
        stop_task()
    print('Going to sleep a sec.')
    time.sleep(2)
    print('startup: Exiting rospy_exit_handler.')


if __name__ == '__main__':
    argv = sys.argv[1:]
    try:
        opts, args = getopt.getopt(argv, '', [
            'save', 'plan', 'init', 'no-ts', 'inittime=', 'logging=', 'parent='
        ])
    except getopt.GetoptError:
        print('startup: Could not parse parameters.')
        sys.exit(2)
    initialization_time = None
    logging = 0
    parent = None
    for opt, arg in opts:
        if opt == '--save':
            _save_log = True
        elif opt == '--init':
            rospy.on_shutdown(rospy_exit_handler)
        elif opt == '--inittime':
            initialization_time = arg
        elif opt in ['--logging']:
            l = int(arg)
            if l in [0, 1, 2]:
                logging = l
        elif opt in ['--parent']:
Ejemplo n.º 39
0
            epoll.close()
            skt.close()


if __name__ == "__main__":
    curr_path = os.path.split(os.path.realpath(__file__))[0]
    curr_file_name = os.path.split(os.path.realpath(__file__))[1]
    p_client = curr_path + os.sep + "config" + os.sep + "config.xml"
    client_conf = getconf(p_client, 'name', 'client')
    p_log = curr_path + os.sep + 'log' + os.sep + 'client.log'
    log = comm_lib.log(p_log)
    server_ip = client_conf['server']['ip']
    server_port = client_conf['server']['port']
    task_log_path = client_conf['server']['task_log_path']
    global sk
    sk = ''

    help_msg = 'Usage: python %s <start|stop|restart|debug>' % sys.argv[0]
    clientdaemon = clientdaemon(curr_path + '/client_pid.pid',
                                server_ip=server_ip,
                                server_port=server_port)
    if len(sys.argv) == 1:
        clientdaemon.start()
    elif sys.argv[1] in ['debug']:
        clientdaemon.run()
    elif sys.argv[1] in ['start', 'stop', 'restart']:
        getattr(clientdaemon, sys.argv[1])()
    else:
        print help_msg
        sys.exit(1)
Ejemplo n.º 40
0
    def cross_validate(self,) :
        cv_input = None
        # Make a mapping for just the segments / diagrams / whatever we need for cross validation
        cv_indices = list(set(itertools.chain.from_iterable([cv.train + cv.test for cv in self.partitions.cross_validation])))
        cv_indices.sort()
        
        cv_partitions = [TrainTestPartition(train=[cv_indices.index(i) for i in cv.train],
                                            test=[cv_indices.index(i) for i in cv.test],
                                            state=cv.state) for cv in self.partitions.cross_validation]
        learning_class = None
        kernel_class = None
        distances_class = None
        if self.kernel_module != None :
            print self.kernel_module
            kernel_module = importlib.import_module("persistence." + self.kernel_module)
            kernel_class = getattr(kernel_module, self.kernel_module)
            kernel_input_type = kernel_class.get_input_type()
            kernel_input_module =  importlib.import_module("persistence.Datatypes." + kernel_input_type)
            kernel_input_class = getattr(kernel_input_module, kernel_input_type)

            cv_input = kernel_input_class.fromJSONDict(self.input_json)
            field = kernel_input_class.get_iterable_field()
            # narrow the input to only the cross validation inputs
            cv_input[field] = [cv_input[field][i] for i in cv_indices]
        elif self.distances_module != None :
            distances_module = importlib.import_module("persistence." + self.distances_module)
            distances_class = getattr(distances_module, self.distances_module)
            distances_input_type = distances_class.get_input_type()
            distances_input_module =  importlib.import_module("persistence.Datatypes." + distances_input_type)
            distances_input_class = getattr(distances_input_module, distances_input_type)
            cv_input = distances_input_class.fromJSONDict(self.input_json)
            field = distances_input_class.get_iterable_field()
            # narrow the input to only the cross validation inputs
            cv_input[field] = [cv_input[field][i] for i in cv_indices]
        
        learning_module = importlib.import_module("persistence." + self.learning_module)
        learning_class = getattr(learning_module, self.learning_module)
        learning_input_type = learning_class.get_input_type()
        learning_input_module =  importlib.import_module("persistence.Datatypes." + learning_input_type)
        learning_input_class = getattr(learning_input_module, learning_input_type)

        # Cross validation only using the learning_arg value 
        if self.kernel_module == None and self.distances_module == None:
            cv_input = learning_input_class.fromJSONDict(self.input_json)


        learning_results = []
        if isinstance(self.kernel_arg, list) :
            kernel_args = self.kernel_arg
        else :
            kernel_args = [self.kernel_arg]
        
        if self.kernel_module != None :
            # Precompute kernel objects
            def computed_kernel(arg) :
                config = copy(self.config)
                scale_arg = kernel_class.get_scale_arg()
                if scale_arg != None :
                    config[scale_arg] = arg
                kernel = kernel_class(config, cv_input, pool=self.pool)
                print "Computing %s for %s of %s" % ( self.kernel_module, scale_arg, arg ) 
                kernel.compute_kernel()
                kernel.pool = None
                return kernel
            kernel_objects = [computed_kernel(arg) for arg in kernel_args]
        else :
            kernel_objects = None

        if isinstance(self.distances_arg, list) :
            distances_args = self.distances_arg
        else :
            distances_args = [self.distances_arg]

        if self.distances_module != None :
            # Precompute distances objects
            def computed_distances(arg) :
                config = copy(self.config)
                scale_arg = distances_class.get_scale_arg()
                if scale_arg != None :
                    config[scale_arg] = arg
                distances = distances_class(config, cv_input, pool=self.pool)
                print "Computing %s for %s of %s" % ( self.distances_module, scale_arg, arg ) 
                distances.compute_distances()
                distances.pool = None
                return distances
            distances_objects = [computed_distances(arg) for arg in distances_args]
        else :
            distances_objects = None

        if isinstance(self.learning_arg, list) :
            learning_args = self.learning_arg
        else :
            learning_args = [self.learning_arg]

        validator = Validator(self.config, 
                              kernel_class, kernel_args, distances_class, distances_args, learning_class, 
                              kernel_objects, distances_objects, cv_input, 
                              self.partitions, cv_partitions)
        if self.pool == None :
            print "single thread computations"
            results = itertools.imap(validator, 
                                     itertools.product(kernel_args, distances_args, learning_args, 
                                                       self.partitions.cross_validation))
            results = list(results)
        else :
            results = self.pool.imap(validator, 
                                     itertools.product(kernel_args, distances_args, learning_args, 
                                                       self.partitions.cross_validation),
                                     1)
            final_results = []
            try:
                while True:
                    if self.timeout > 0 :
                        result = results.next(self.timeout)
                    else :
                        result = results.next()
                    final_results.append(result)
            except StopIteration:
                pass
            except multiprocessing.TimeoutError as e:
                self.pool.terminate()
                print traceback.print_exc()
                sys.exit(1)
            results = final_results

        results = list(results)
        best_result = (None, 0.0)
        learning_scale = None
        kernel_scale = None
        distances_scale = None
        for (kernel_arg, distances_arg, learning_arg) in itertools.product(kernel_args, distances_args, learning_args) :
            these_results = [result for (_kernel_arg, _distances_arg, _learning_arg, result) in results if kernel_arg == _kernel_arg and distances_arg == _distances_arg and learning_arg == _learning_arg]
            config = copy(self.config)
            learning_scale = learning_class.get_scale_arg()
            if learning_scale != None :
                config[learning_scale] = learning_arg
            if self.kernel_module != None and kernel_args != None :
                kernel_scale = kernel_class.get_scale_arg()
                if kernel_scale != None :
                    config[kernel_scale] = kernel_arg
            elif self.distances_module != None and distances_args != None :
                distances_scale = distances_class.get_scale_arg()
                if distances_scale != None :
                    config[distances_scale] = distances_arg
            correct = Learning(config, these_results).get_average_correct()
            if correct > best_result[1]:
                best_result = (config, correct)

        self.config = best_result[0]
        print "Best result %02.2f%% %s%s%s" % \
            (best_result[1] * 100.0, 
             ("%s %s " % (kernel_scale, self.config[kernel_scale])) if kernel_scale != None else "", 
             ("%s %s " % (distances_scale, self.config[distances_scale])) if distances_scale != None else "",
             ("%s %s " % (learning_scale, self.config[learning_scale])) if learning_scale != None else "")
        self.config.status = 'CrossValidation'
Ejemplo n.º 41
0
        print "%d -> %d;" % \
            (lookup[used.getiterator('cause').next().get('id')],
             lookup[used.getiterator('effect').next().get('id')])
    for wgb in root.getiterator('causalDependencies').next().getiterator(
            'wasGeneratedBy'):
        if not should_include(wgb):
            continue

        print "%d -> %d;" % \
            (lookup[wgb.getiterator('cause').next().get('id')],
             lookup[wgb.getiterator('effect').next().get('id')])
    for wtb in root.getiterator('causalDependencies').next().getiterator(
            'wasTriggeredBy'):
        if not should_include(wtb):
            continue
        print "%d -> %d;" % \
            (lookup[wtb.getiterator('cause').next().get('id')],
             lookup[wtb.getiterator('effect').next().get('id')])

    for error_edge in error_edges:
        print "%d -> %d;" % error_edge
    print "}"


if __name__ == '__main__':
    if len(sys.argv) < 2:
        print "Usage: python %s <opm_file> [account]" % sys.argv[0]
        sys.exit(42)
    account = sys.argv[2] if len(sys.argv) > 2 else None
    run(sys.argv[1], account)
Ejemplo n.º 42
0
    def run(self):
        server_ip = self.server_ip
        server_port = self.server_port
        global sk
        skt = comm_lib.sock_client(server_ip, int(server_port))
        sk = skt
        if not skt.conn():
            log.err('connection to %s:%s failed' % (server_ip, server_port))
            sys.exit()
        else:
            log.info('connection to %s:%s success' % (server_ip, server_port))

        epoll = select.epoll()
        thread_poll = comm_lib.thread_manager()

        epoll.register(skt.get_fielno(), select.EPOLLOUT)
        try:
            while 1:
                events = epoll.poll(-1)
                for fileno, event in events:
                    if event & select.EPOLLOUT:
                        epoll.modify(fileno, select.EPOLLIN)
                    elif event & select.EPOLLIN:
                        data = ''
                        try:
                            data = skt.recv_data()
                        except:
                            if sys.exc_info()[0]:
                                log.warn('lineno:' +
                                         str(sys._getframe().f_lineno) + ": " +
                                         str(sys.exc_info()))
                        if not data:
                            count = 0
                            log.err('disconnection from  %s' % server_ip)
                            while 1:
                                try:
                                    epoll.unregister(fileno)
                                except:
                                    pass

                                skt = comm_lib.sock_client(
                                    server_ip, int(server_port))
                                if not skt.conn():
                                    log.err(
                                        'reconntinue %s:%s failed.continue.' %
                                        (server_ip, server_port))
                                    if count < 30:
                                        time.sleep(1)
                                    elif count > 30 and count < 90:
                                        time.sleep(10)
                                    else:
                                        time.sleep(60)
                                        continue
                                else:
                                    log.info('reconntinue %s:%s success.' %
                                             (server_ip, server_port))
                                    sk = skt
                                    #新的 fileno
                                    fileno = sk.get_fielno()
                                    #重新注册一遍
                                    epoll.register(fileno, select.EPOLLOUT)
                                    break

                        else:
                            dt = get_data(data)
                            if dt:
                                if dt['data'] != "conn" and dt[
                                        'data'] != "close":
                                    try:
                                        thread_poll.add_worker(
                                            target=do_request, args=(dt, ))
                                    except:
                                        if sys.exc_info()[0]:
                                            log.warn(
                                                'lineno:' +
                                                str(sys._getframe().f_lineno) +
                                                ": " + str(sys.exc_info()))

                        epoll.modify(fileno, select.EPOLLOUT)
                    elif event & select.EPOLLERR:
                        epoll.modify(fileno, select.EPOLLOUT)
                    elif event & select.EPOLLHUP:
                        epoll.unregister(fileno)
                        skt.close()
        finally:
            log.info(sys.exc_info())
            epoll.unregister(skt.socket().fileno())
            epoll.close()
            skt.close()
Ejemplo n.º 43
0
#!C:\Users\Owner.ASUS-DESKTOP\PycharmProjects\2DGame\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install-3.8'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point

if __name__ == '__main__':
    sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
    sys.exit(
        load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install-3.8')()
    )
Ejemplo n.º 44
0
import os
import multiprocessing
import subprocess
import sys


def run(process_name, val):
    subprocess.call('python ' + process_name, shell=True)
    val.value = val.value + 1


if __name__ == '__main__':
    manager = multiprocessing.Manager()
    val = manager.Value('myval', 0)
    pool = multiprocessing.Pool()
    cur_path = os.getcwd()
    for i in range(3):
        process_name = os.path.join(cur_path, 'test' + str(i) + '.py')
        pool.apply_async(run, args=(process_name, val))
    try:
        while val.value < 3:
            continue
    except KeyboardInterrupt:
        sys.exit()
    else:
        pool.close()
        pool.join()
Ejemplo n.º 45
0
#!F:\pythonspace\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip3.7'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point

if __name__ == '__main__':
    sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
    sys.exit(load_entry_point('pip==10.0.1', 'console_scripts', 'pip3.7')())
Ejemplo n.º 46
0
                else:
                    for c in CHANNELS:
                        if VERBOSE>1:
                            print "\nchannel: %d, node: " % c,
                        wnwk.channel(c)
                        for n in ADDRESSES:
                            if VERBOSE>1:
                                print n,
                            wnwk.jbootl(n)
                if VERBOSE:
                    print
                listeners = []
                for n in ADDRESSES:
                    res = wnwk.ping(n)
                    if res['code'] == 'OK':
                        if res.get("appname") == "wibo":
                            listeners.append(n)
                print "Wibo listeners:", listeners
            elif o == "-E":
                for n in ADDRESSES:
                    wnwk.exit(n)
                    print "EXIT:",n, wnwk.exit(n)
    wnwk.close()
    return ret

if __name__ == "__main__":
    do_exit = process_command_line()
    if do_exit:
        sys.exit(0)
    # init_prompt()
Ejemplo n.º 47
0
# 使用方法:python sql2struct.py your.sql your.go

import sys

def convertCaptitle(s):
	s = s[1:len(s)-1]
	l = s.split('_')
	s = ''
	for i in l:
		s += i.title()

	return s

if __name__ == '__main__':
	if len(sys.argv) < 3:
		sys.exit(-1)

	sql = open(sys.argv[1])
	go = open(sys.argv[2], "wb")
	flag = False

	for line in sql:
		if line.startswith('CREATE'):
			flag = True
			s = line.strip().split(' ')[2]
			s = convertCaptitle(s)
			go.write("type " + s + " struct{\n")
		elif flag == True:
			line = line.strip()
			if line.startswith('`'):
				L = line.split(' ')
Ejemplo n.º 48
0
    def process_config_file(self, f, install_dir, **kwargs):
        global stn_info

        # Open up and parse the distribution config file:
        try:
            dist_config_dict = configobj.ConfigObj(f, file_error=True)
        except IOError as e:
            sys.exit(str(e))
        except SyntaxError as e:
            sys.exit(
                "Syntax error in distribution configuration file '%s': %s" %
                (f, e))

        # The path where the weewx.conf configuration file will be installed
        install_path = os.path.join(install_dir, os.path.basename(f))

        # Do we have an old config file?
        if os.path.isfile(install_path):
            # Yes. Read it
            config_path, config_dict = weecfg.read_config(install_path, None)
            if DEBUG:
                print "Old configuration file found at", config_path

            # Update the old configuration file to the current version,
            # then merge it into the distribution file
            weecfg.update_and_merge(config_dict, dist_config_dict)
        else:
            # No old config file. Use the distribution file, then, if we can,
            # prompt the user for station specific info
            config_dict = dist_config_dict
            if not self.no_prompt:
                # Prompt the user for the station information:
                stn_info = weecfg.prompt_for_info()
                driver = weecfg.prompt_for_driver(stn_info.get('driver'))
                stn_info['driver'] = driver
                stn_info.update(
                    weecfg.prompt_for_driver_settings(driver, config_dict))
                if DEBUG:
                    print "Station info =", stn_info
            weecfg.modify_config(config_dict, stn_info, DEBUG)

        # Set the WEEWX_ROOT
        config_dict['WEEWX_ROOT'] = os.path.normpath(install_dir)

        # NB: use mkstemp instead of NamedTemporaryFile because we need to
        # do the delete (windows gets mad otherwise) and there is no delete
        # parameter in NamedTemporaryFile in python 2.5.

        # Time to write it out. Get a temporary file:
        tmpfd, tmpfn = tempfile.mkstemp()
        tmpfile = open(tmpfn, 'w')

        # Write the finished configuration file to it:
        config_dict.write(tmpfile)
        tmpfile.flush()
        tmpfile.close()
        os.close(tmpfd)

        # Save the old config file if it exists:
        if not self.dry_run and os.path.exists(install_path):
            backup_path = weeutil.weeutil.move_with_timestamp(install_path)
            print "Saved old configuration file as %s" % backup_path

        # Now install the temporary file (holding the merged config data)
        # into the proper place:
        rv = install_data.copy_file(self, tmpfn, install_path, **kwargs)

        # Now get rid of the temporary file
        os.remove(tmpfn)

        # Set the permission bits unless this is a dry run:
        if not self.dry_run:
            shutil.copymode(f, install_path)

        return rv
Ejemplo n.º 49
0
 def _signal_handler(self, sig, frame):
     self._handle_failure(f"Terminated with signal {sig}\n" +
                          "".join(traceback.format_stack(frame)))
     sys.exit(sig + 128)
Ejemplo n.º 50
0
    def main(cmd_args):
        import optparse
        global options, PSYCO
        usage = "\n%prog [options] command [input-file-patterns]\n" + cmd_doc
        oparser = optparse.OptionParser(usage)
        oparser.add_option("-l",
                           "--logfilename",
                           default="",
                           help="contains error messages")
        oparser.add_option(
            "-v",
            "--verbosity",
            type="int",
            default=0,
            help="level of information and diagnostics provided")
        oparser.add_option(
            "-m",
            "--mmap",
            type="int",
            default=-1,
            help="1: use mmap; 0: don't use mmap; -1: accept heuristic")
        oparser.add_option("-e",
                           "--encoding",
                           default="",
                           help="encoding override")
        oparser.add_option(
            "-f",
            "--formatting",
            type="int",
            default=0,
            help="0 (default): no fmt info\n"
            "1: fmt info (all cells)\n",
        )
        oparser.add_option(
            "-g",
            "--gc",
            type="int",
            default=0,
            help=
            "0: auto gc enabled; 1: auto gc disabled, manual collect after each file; 2: no gc"
        )
        oparser.add_option(
            "-s",
            "--onesheet",
            default="",
            help="restrict output to this sheet (name or index)")
        oparser.add_option("-u",
                           "--unnumbered",
                           action="store_true",
                           default=0,
                           help="omit line numbers or offsets in biff_dump")
        oparser.add_option("-d",
                           "--on-demand",
                           action="store_true",
                           default=0,
                           help="load sheets on demand instead of all at once")
        oparser.add_option("-t",
                           "--suppress-timing",
                           action="store_true",
                           default=0,
                           help="don't print timings (diffs are less messy)")
        oparser.add_option("-r",
                           "--ragged-rows",
                           action="store_true",
                           default=0,
                           help="open_workbook(..., ragged_rows=True)")
        options, args = oparser.parse_args(cmd_args)
        if len(args) == 1 and args[0] in ("version", ):
            pass
        elif len(args) < 2:
            oparser.error("Expected at least 2 args, found %d" % len(args))
        cmd = args[0]
        xlrd_version = getattr(xlrd, "__VERSION__", "unknown; before 0.5")
        if cmd == 'biff_dump':
            xlrd.dump(args[1], unnumbered=options.unnumbered)
            sys.exit(0)
        if cmd == 'biff_count':
            xlrd.count_records(args[1])
            sys.exit(0)
        if cmd == 'version':
            print("xlrd: %s, from %s" % (xlrd_version, xlrd.__file__))
            print("Python:", sys.version)
            sys.exit(0)
        if options.logfilename:
            logfile = LogHandler(open(options.logfilename, 'w'))
        else:
            logfile = sys.stdout
        mmap_opt = options.mmap
        mmap_arg = xlrd.USE_MMAP
        if mmap_opt in (1, 0):
            mmap_arg = mmap_opt
        elif mmap_opt != -1:
            print('Unexpected value (%r) for mmap option -- assuming default' %
                  mmap_opt)
        fmt_opt = options.formatting | (cmd in ('xfc', ))
        gc_mode = options.gc
        if gc_mode:
            gc.disable()
        for pattern in args[1:]:
            for fname in glob.glob(pattern):
                print("\n=== File: %s ===" % fname)
                if logfile != sys.stdout:
                    logfile.setfileheading("\n=== File: %s ===\n" % fname)
                if gc_mode == 1:
                    n_unreachable = gc.collect()
                    if n_unreachable:
                        print("GC before open:", n_unreachable,
                              "unreachable objects")
                if PSYCO:
                    import psyco
                    psyco.full()
                    PSYCO = 0
                try:
                    t0 = time.time()
                    bk = xlrd.open_workbook(
                        fname,
                        verbosity=options.verbosity,
                        logfile=logfile,
                        use_mmap=mmap_arg,
                        encoding_override=options.encoding,
                        formatting_info=fmt_opt,
                        on_demand=options.on_demand,
                        ragged_rows=options.ragged_rows,
                    )
                    t1 = time.time()
                    if not options.suppress_timing:
                        print("Open took %.2f seconds" % (t1 - t0, ))
                except xlrd.XLRDError as e:
                    print("*** Open failed: %s: %s" % (type(e).__name__, e))
                    continue
                except KeyboardInterrupt:
                    print("*** KeyboardInterrupt ***")
                    traceback.print_exc(file=sys.stdout)
                    sys.exit(1)
                except BaseException as e:
                    print("*** Open failed: %s: %s" % (type(e).__name__, e))
                    traceback.print_exc(file=sys.stdout)
                    continue
                t0 = time.time()
                if cmd == 'hdr':
                    bk_header(bk)
                elif cmd == 'ov':  # OverView
                    show(bk, 0)
                elif cmd == 'show':  # all rows
                    show(bk)
                elif cmd == '2rows':  # first row and last row
                    show(bk, 2)
                elif cmd == '3rows':  # first row, 2nd row and last row
                    show(bk, 3)
                elif cmd == 'bench':
                    show(bk, printit=0)
                elif cmd == 'fonts':
                    bk_header(bk)
                    show_fonts(bk)
                elif cmd == 'names':  # named reference list
                    show_names(bk)
                elif cmd == 'name_dump':  # named reference list
                    show_names(bk, dump=1)
                elif cmd == 'labels':
                    show_labels(bk)
                elif cmd == 'xfc':
                    count_xfs(bk)
                else:
                    print("*** Unknown command <%s>" % cmd)
                    sys.exit(1)
                del bk
                if gc_mode == 1:
                    n_unreachable = gc.collect()
                    if n_unreachable:
                        print("GC post cmd:", fname, "->", n_unreachable,
                              "unreachable objects")
                if not options.suppress_timing:
                    t1 = time.time()
                    print("\ncommand took %.2f seconds\n" % (t1 - t0, ))

        return None
def create_instrumented_model(args, **kwargs):
    '''
    Creates an instrumented model out of a namespace of arguments that
    correspond to ArgumentParser command-line args:
      model: a string to evaluate as a constructor for the model.
      pthfile: (optional) filename of .pth file for the model.
      layers: a list of layers to instrument, defaulted if not provided.
      edit: True to instrument the layers for editing.
      gen: True for a generator model.  One-pixel input assumed.
      imgsize: For non-generator models, (y, x) dimensions for RGB input.
      cuda: True to use CUDA.
  
    The constructed model will be decorated with the following attributes:
      input_shape: (usually 4d) tensor shape for single-image input.
      output_shape: 4d tensor shape for output.
      feature_shape: map of layer names to 4d tensor shape for featuremaps.
      retained: map of layernames to tensors, filled after every evaluation.
      ablation: if editing, map of layernames to [0..1] alpha values to fill.
      replacement: if editing, map of layernames to values to fill.

    When editing, the feature value x will be replaced by:
        `x = (replacement * ablation) + (x * (1 - ablation))`
    '''

    args = EasyDict(vars(args), **kwargs)

    # Construct the network
    if args.model is None:
        pbar.print('No model specified')
        return None
    if isinstance(args.model, torch.nn.Module):
        model = args.model
    else:
        model = autoimport_eval(args.model)
    # Unwrap any DataParallel-wrapped model
    if isinstance(model, torch.nn.DataParallel):
        model = next(model.children())

    # Load its state dict
    meta = {}
    if getattr(args, 'pthfile', None) is not None:
        data = torch.load(args.pthfile)
        modelkey = getattr(args, 'modelkey', 'state_dict')
        if modelkey in data:
            meta = {}
            for key in data:
                if isinstance(data[key], numbers.Number):
                    meta[key] = data[key]
            data = data[modelkey]
        submodule = getattr(args, 'submodule', None)
        if submodule is not None and len(submodule):
            remove_prefix = submodule + '.'
            data = {
                k[len(remove_prefix):]: v
                for k, v in data.items() if k.startswith(remove_prefix)
            }
            if not len(data):
                pbar.print('No submodule %s found in %s' %
                           (submodule, args.pthfile))
                return None
        model.load_state_dict(data,
                              strict=not getattr(args, 'unstrict', False))

    # Decide which layers to instrument.
    if getattr(args, 'layer', None) is not None:
        args.layers = [args.layer]
    # If the layer '?' is the only specified, just print out all layers.
    if getattr(args, 'layers', None) is not None:
        if len(args.layers) == 1 and args.layers[0] == ('?', '?'):
            for name, layer in model.named_modules():
                pbar.print(name)
            import sys
            sys.exit(0)
    if getattr(args, 'layers', None) is None:
        # Skip wrappers with only one named model
        container = model
        prefix = ''
        while len(list(container.named_children())) == 1:
            name, container = next(container.named_children())
            prefix += name + '.'
        # Default to all nontrivial top-level layers except last.
        args.layers = [
            prefix + name for name, module in container.named_children()
            if type(module).__module__ not in [
                # Skip ReLU and other activations.
                'torch.nn.modules.activation',
                # Skip pooling layers.
                'torch.nn.modules.pooling'
            ]
        ][:-1]
        pbar.print('Defaulting to layers: %s' % ' '.join(args.layers))

    # Now wrap the model for instrumentation.
    model = InstrumentedModel(model)
    model.meta = meta

    # Instrument the layers.
    model.retain_layers(args.layers)
    model.eval()
    if args.cuda:
        model.cuda()

    # Annotate input, output, and feature shapes
    annotate_model_shapes(model,
                          gen=getattr(args, 'gen', False),
                          imgsize=getattr(args, 'imgsize', None))
    return model
Ejemplo n.º 52
0
#!C:\Users\josep\PycharmProjects\intro-to-algo-lab9\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip3'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point

if __name__ == '__main__':
    sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
    sys.exit(
        load_entry_point('pip==19.0.3', 'console_scripts', 'pip3')()
    )
Ejemplo n.º 53
0
def prompt_exit():
    input(tr('Press any key to exit ...', lang=lang))
    sys.exit(1)
 def _canceled(self, instance):
     import sys
     sys.exit()
USAGE = """
copyright_header.py - utilities for managing copyright headers of 'The Bitcoin
Core developers' in repository source files.

Usage:
    $ ./copyright_header <subcommand>

Subcommands:
    report
    update
    insert

To see subcommand usage, run them without arguments.
"""

SUBCOMMANDS = ['report', 'update', 'insert']

if __name__ == "__main__":
    if len(sys.argv) == 1:
        sys.exit(USAGE)
    subcommand = sys.argv[1]
    if subcommand not in SUBCOMMANDS:
        sys.exit(USAGE)
    if subcommand == 'report':
        report_cmd(sys.argv)
    elif subcommand == 'update':
        update_cmd(sys.argv)
    elif subcommand == 'insert':
        insert_cmd(sys.argv)
Ejemplo n.º 56
0
        if os.path.splitext(os.path.basename(FNAME))[0] == STEM:
            FNAME = os.path.join(DIRPATH, FNAME)
            sys.path.append(DIRPATH)
            import pkgdata

            PKG_NAME = os.path.basename(
                os.path.dirname(os.path.abspath(sys.modules["pkgdata"].__file__))
            )
            FOUND = True
            break
if not FOUND:
    raise RuntimeError("Supported Python interpreter versions cold not be found")
PYTHON_VER = python_version("{0:0x}".format(sys.hexversion & 0xFFFF0000)[:-4])
SUPPORTED_INTERPS = sorted(pkgdata.SUPPORTED_INTERPS)
if PYTHON_VER not in SUPPORTED_INTERPS:
    sys.exit("Supported interpreter versions: {0}".format(", ".join(SUPPORTED_INTERPS)))


###
# Functions
###
def get_short_desc(long_desc):
    """Get first sentence of first paragraph of long description."""
    found = False
    olines = []
    for line in [item.rstrip() for item in long_desc.split("\n")]:
        if found and (((not line) and (not olines)) or (line and olines)):
            olines.append(line)
        elif found and olines and (not line):
            return (" ".join(olines).split(".")[0]).strip()
        found = line == ".. [[[end]]]" if not found else found
Ejemplo n.º 57
0
 def run_tests(self):
     import pytest
     errno = pytest.main(self.pytest_args or [] + ["tests"])
     sys.exit(errno)
Ejemplo n.º 58
0
 def __on_close(self):
     pygame.quit()
     sys.exit()
Ejemplo n.º 59
0
    result = []
    if file is None or column is None:
        for text in args:
            result.append(e.find_entities(text))
    else:
        with open(file, 'r') as file:
            reader = csv.reader(file)
            first_row = True
            for row in reader:
                if not first_row and len(row) >= column + 1:
                    text = row[column]
                    result.append(e.find_entities(text))
                first_row = False
    print(json.dumps(result))


def main():
    parser = argparse.ArgumentParser(description='Extract Noun from csv file')
    parser.add_argument('-f', '--file', help="CSV file path")
    parser.add_argument('-c',
                        '--column',
                        help="Index of parsing row",
                        type=int)
    parser.add_argument('args', nargs=argparse.REMAINDER)
    args = parser.parse_args()
    return extract(args.file, args.column, args.args)


if __name__ == "__main__":
    sys.exit(main())
Ejemplo n.º 60
0
def show_usage_and_die():
    show_usage()
    sys.exit(1)