Esempio n. 1
0
    def sftp_get_dir_exclude(self, sftp, remote_dir, local_dir, excludes=[]):
        # 去掉路径字符串最后的字符'/',如果有的话
        if remote_dir[-1] == '/':
            remote_dir = remote_dir[0:-1]

        # remote_dir如果是文件
        if not stat.S_ISDIR(sftp.stat(remote_dir).st_mode):
            (tmp_dir, filename) = os.path.split(remote_dir)
            if not (filename in excludes or x.filename in excludes):
                sftp.get(remote_dir, os.path.join(local_dir, filename))
            return

        # remote_dir 如果是目录,列出所有目录下的文件及目录循环处理
        files = sftp.listdir_attr(remote_dir)
        for x in files:
            filename = remote_dir + '/' + x.filename

            # 如果判断为无需下载的文件,则跳过本次循环,进入下一循环
            if self.notDownloadFiles(x.filename, filename, excludes):
                continue

            # 如果是目录,则递归处理该目录,远端通过stat.S_ISDIR(st_mode)
            if stat.S_ISDIR(x.st_mode):
                tmp_dir = os.path.join(local_dir, x.filename)
                logging.INFO('Get文件夹%s 传输中...' % filename)
                os.mkdir(tmp_dir)
                self.sftp_get_dir_exclude(sftp, filename, tmp_dir, excludes)
            else:
                local_filename = os.path.join(local_dir, x.filename)
                logging.INFO('Get文件  %s 传输中...' % filename)
                sftp.get(filename, local_filename)
    def eval(self,xs,ys):
        logging.INFO("eval begin....")
        decoder_inputs, y, y_seqlen, sents2 = ys
        decoder_inputs = tf.compat.v1.ones((tf.shape(xs[0])[0], 1), tf.compat.v1.int32) * self.token2idx["<s>"]

        ys = (decoder_inputs, y, y_seqlen, sents2)

        memory, sents1, src_masks = self.encode(xs, False)

        logging.info("Inference graph is being built. Please be patient.")
        for _ in tqdm(range(self.hp.maxlen2)):
            logits, y_hat, y, sents2 = self.decode(ys, memory, src_masks, False)
            """ .all() """
            if np.array(tf.compat.v1.reduce_sum(y_hat, 1) == self.token2idx["<pad>"]).all(): break

            _decoder_inputs = tf.compat.v1.concat((decoder_inputs, y_hat), 1)
            ys = (_decoder_inputs, y, y_seqlen, sents2)

        # monitor a random sample
        n = tf.compat.v1.random_uniform((), 0, tf.compat.v1.shape(y_hat)[0] - 1, tf.int32)
        sent1 = sents1[n]
        pred = convert_idx_to_token_tensor(y_hat[n], self.idx2token)
        sent2 = sents2[n]

        tf.compat.v1.summary.text("sent1", sent1)
        tf.compat.v1.summary.text("pred", pred)
        tf.compat.v1.summary.text("sent2", sent2)

        # tf.compat.v1.disable_eager_execution()
        summaries = tf.compat.v1.summary.merge_all()

        logging.INFO("Done!")
        return y_hat, summaries
Esempio n. 3
0
 def generate_invoice_from_task(self):
     task_id = helper.find_random_task_done(self.client)
     if not task_id:
         logging.INFO("Failed to generate Invoice -- no Task found")
         return ()
     logging.INFO("Created Invoice for task: " + task_id.name)
     return task_id.generate_invoices()
Esempio n. 4
0
def main():

    # Default config file and profile
    config = from_file()
    # Create artifact
    artifacts_client = ArtifactsClient(config)

    # Upload Image and Signature Flow
    kms_key_id = "ocid1.key.oc1..exampleuniqueID"
    kms_key_version_id = "ocid1.keyversion.oc1..exampleuniqueID"
    signing_algo = "SHA_512_RSA_PKCS_PSS"
    compartment_id = "ocid1.compartment.oc1..exampleuniqueID"
    image_id = "ocid1.containerimage.oc1..exampleuniqueID"
    description = "Image built by TC"
    metadata = "{\"buildNumber\":\"123\"}"

    signature = sign_and_upload_container_image_signature_metadata(
        artifacts_client, config, kms_key_id, kms_key_version_id, signing_algo,
        compartment_id, image_id, description, metadata)
    logging.INFO("A signature has been successfully uploaded: %s", signature)

    # Pull Image and Verify Signature Flow
    repo_name = "repo-name"
    trusted_keys = ["ocid1.key.oc1..keyId1", "ocid1.key.oc1..keyId2"]
    image_digest = "sha256:12345"

    verified = get_and_verify_image_signature_metadata(artifacts_client,
                                                       compartment_id, False,
                                                       repo_name, image_digest,
                                                       trusted_keys)
    if verified:
        logging.INFO("At least one of the signatures is verified")
    else:
        logging.WARN("None of the signatures is verified")
Esempio n. 5
0
def connect(ip,username,password,port,prompt=']#'):
    try:
        ssh_newkey='Are you sure you want to continue connecting'
        #child=pexpect.spawn('ssh '+username + '@'+ip+' -p '+port,maxread=5000)
		child=pexpect.spawn('ssh '+username + '@'+ip+' -p '+port,maxread=5000)
        child.logfile=fout
        i=child.expect([prompt,'assword:*',ssh_newkey,'refused',pexpect.TIMEOUT,'key.*? failed'])
        print i
    #if not False:
       # print child.before,child.after
        if i==0:
            pass
        elif i==1:
            child.send(password+'\r')
        if i==2:
            child.sendline('yes')
        if i==4:
            raise Exception('Error TIMEOUT!')
        if i==3:
            print 'Connect refused'
        if i==5:
                print child.before,child.after
                os.remove(os.path.expanduser('~')+'/.ssh/known_hosts')
        child.expect('#')
        child.sendline(options.cmd)
        child.expect(']#')
        logging.INFO( 'The command %s result is:' % options.cmd)
        logging.INFO( child.before)
Esempio n. 6
0
def tesa(a):
    logging.info("into the function tesa ")
    #logging.critical("the function got the value "+str(a))
    logging.critical("the function got the value " + str(a))
    logging.error("the function got the value " + str(a))
    logging.warning("the function got the value " + str(a))
    pdb.set_trace()
    logging.info("the function got the value " + str(a))
    logging.debug("the function got the value " + str(a))

    try:
        b = a / 2
    except Exception as ex:
        logging.DEBUG("an exception has occured DEBUG")

        logging.INFO("an exception has occuredINFO")

        logging.WARNING("an exception has occuredWARNING")

        logging.ERROR("an exception has occuredERROR")

        logging.CRITICAL("an exception has occuredCRITICAL")
        logging.CRITICAL(str(ex))
        logging.ERROR(str(ex))
        logging.WARNING(str(ex))
        logging.INFO(str(ex))
        logging.DEBUG(str(ex))
    def encode(self,xs,training = True):
        logging.INFO("encode .....")
        with tf.compat.v1.variable_scope("encoder",reuse=tf.compat.v1.AUTO_REUSE):
            x,seqlens,sents1 = xs
            src_masks = tf.math.equal(x,0)

            enc = tf.compat.v1.nn.embedding_lookup(self.embeddings,x)
            enc *= self.hp.d_model ** 0.5 # scale

            enc += positional_encoding(enc,self.hp.maxlen1)
            enc = tf.compat.v1.layers.dropout(enc,self.dropout_rate,training=training)

            for i in range(self.hp.num_blocks):
                with tf.compat.v1.variable_scope("num_blocks_{}".format(i),reuse=tf.compat.v1.AUTO_REUSE):
                    enc = multihead_attention(queries=enc,
                                              keys=enc,
                                              values=enc,
                                              key_masks=src_masks,
                                              num_heads=self.hp.num_heads,
                                              dropout_rate=self.hp.dropout_rate,
                                              training=training,
                                              cavsality=False)
                    enc = ff(enc,num_units=[self.hp.d_ff,self.hp.d_model])
        memory = enc
        logging.INFO("Done")
        return memory,sents1,src_masks
Esempio n. 8
0
 def find_and_gettext(self, locator, value: str = None):
     logging.INFO(locator)
     logging.INFO(value)
     element: WebElement
     try:
         element_text = self._driver.find_element(*locator).text if isinstance(locator,tuple) \
             else self._driver.find_element(locator, value).text
         # 隐式等待时间恢复
         self._driver.implicitly_wait(10)
         # 找到之后 _error_num 归0
         self._error_num = 0
         return element_text
     # 处理黑名单中的弹窗
     except Exception as e:
         # 出现异常, 将隐式等待设置小一点,快速的处理弹
         self._driver.implicitly_wait(1)
         # 判断异常处理次数
         if self._error_num > self._max_errnum:
             raise e
         self._error_num += 1
         # 处理黑名单里面的弹框
         for ele in self._black_list:
             elelist = self._driver.find_and_gettext(*ele)
             if len(elelist) > 0:
                 elelist[0].click()
                 # 处理完弹框,再将去查找目标元素
                 return self.find_and_gettext(locator, value)
             raise e
Esempio n. 9
0
def handle(command, channel, user):
    """
        If the parser function finds a correct bot call or !sticky command,
        firehose() will pass the link and channel ID to this function, where
        the link will be validated and then stickied if it passes the check.
    """
    success = 'Roger that! I\'ve stickied your post successfully :)'
    unknown = 'Uh oh...something went wrong. You\'ll need to alert my master!'
    not_safe = 'Whoops...Looks like there\'s already two stickies up, or you\'re less than 6 hours from a scheduled sticky going live :('
    val_failed = 'Look, dammit. I can\'t sticky something if you f**k up the URL. Get it right and then come talk to me.'
    default = 'Uhh...who tf is this guy?'
    unstickied = 'You got it! The post has been unstickied.'
    unsticky_val_failed = 'Hmm...I couldn\'t find a current sticky matching the link you gave me.'
    unsticky_failed = 'For some reason, I just...couldn\'t figure out how to unsticky this post...please forgive me? :\'('

    if user in settings['users'].values():
        if command.startswith(BOT_CMD_UNSTICKY):
            link = command.split(BOT_CMD_UNSTICKY)[1].strip().lower()
            url = link.strip('<>')
            unsticky_id = sticky_bot.validate_unsticky(url)
            if unsticky_id:
                if sticky_bot.unsticky(unsticky_id):
                    logging.INFO('Unstickied!')
                    post_message(channel, unstickied)
                    return
                else:
                    logging.WARNING('Un-sticky failed!')
                    post_message(channel, unsticky_failed)
                    return
            else:
                logging.WARNING('Un-sticky validation failed!')
                post_message(channel, unsticky_val_failed)
                return

        link = command.strip('<>')
        post_id = sticky_bot.validate(link)
        if post_id:
            if sticky_bot.is_sticky_safe():
                if sticky_bot.sticky(post_id):
                    logging.INFO('Stickied!')
                    post_message(channel, success)
                    if settings['call_home']:
                        call_home.sendAlert(link)
                    return
                else:
                    logging.WARNING('Sticky failed!')
                    post_message(channel, unknown)
                    return
            else:
                logging.WARNING('Not safe to sticky.')
                post_message(channel, not_safe)
                return
        else:
            logging.WARNING('Validation failed.')
            post_message(channel, val_failed)
            return
    else:
        post_message(channel, default)
        return
Esempio n. 10
0
 def openingconnection(self):
     try:
         if self.conn == None:
             self.conn = pymysql.connect('127.0.0.1', 'root', '', 'watu')
     except pymysql.MySQLError as e:
         logging.INFO("Error in connection")
     finally:
         logging.INFO("Connection is open!")
Esempio n. 11
0
def get_disk_partitions():
    disk = psutil.disk_partitions()
    for i in disk:
        logging.INFO("磁盘:%s   分区格式:%s" % (i.device, i.fstype))
        disk_use = psutil.disk_usage(i.device)
        logging.INFO("使用了:%sM,空闲:%sM,总共:%sM,使用率\033[1;31;42m%s%%\033[0m," %
                     (disk_use.used / 1024 / 1024, disk_use.free / 1024 / 1024,
                      disk_use.total / 1024 / 1024, disk_use.percent))
Esempio n. 12
0
def main():
    """
    the main entry point of the application
    :return: 
    """
    logging.basicConfig(filename="mySnake.log", level=logging.INFO)
    logging.INFO("Program started")
    result = otherMod.add(7, 8)
    logging.INFO("Done")
Esempio n. 13
0
def update_test_files(model, multiple_image_columns=False):
    """
    This function takes a model string as the main argument, initializes the appropriate
    ImageFeaturizer model, and uses it to predict on the test array and CSV. It logs
    whether the predictions have changed, and then updates the arrays and CSVs accordingly.

    Parameters
    ----------
    model : str
        The name of one of pic2vec's supported models

    multiple_image_columns : bool
        A boolean that determines whether to update the csvs and arrays for single or multiple
        image columns

    Returns
    -------
    None
    """
    # Only autosample if updating the csvs and arrays for multiple image columns
    f = ImageFeaturizer(model=model, autosample=multiple_image_columns)

    # Load and featurize the data corresponding to either the single or multiple image columns
    load_data = LOAD_DATA_ARGS_MULT if multiple_image_columns else LOAD_DATA_ARGS_SINGLE
    f.featurize(**load_data)

    # Updating test CSVs
    features = f.features
    test_csv = CHECK_CSV_MULT if multiple_image_columns else CHECK_CSV_SINGLE

    # Have to convert to float32
    current_csv = pd.read_csv(test_csv.format(model))
    cols = current_csv.select_dtypes(include='float64').columns
    current_csv = current_csv.astype({col: 'float32' for col in cols})

    # Check prediction consistency and update files for test CSVs if necessary
    test_csv_identical = features.equals(current_csv)
    logging.INFO("Test csv identical for {}?".format(model))
    logging.INFO(test_csv_identical)

    if not test_csv_identical:
        features.to_csv(test_csv.format(model), index=False)

    # Updating test arrays
    features = f.features.astype(float).values
    test_array = CHECK_ARRAY_MULT if multiple_image_columns else CHECK_ARRAY_SINGLE

    # Check prediction consistency and update files for test arrays if necessary
    test_array_identical = np.array_equal(features,
                                          np.load(test_array.format(model)))

    logging.INFO("Test array identical for {}?".format(model))
    logging.INFO(test_array_identical)

    if not test_array_identical:
        np.save(test_array.format(model), features)
Esempio n. 14
0
    def save_model(self):
        for model in self.get_w2v_model():
            flnm = "{}{}.txt".format(self.basepath, model['filename'])
            logging.INFO("the new filename is: {}".format(flnm))
            model['gensimmodel'].wv.save_word2vec_format("{}".format(flnm))
            logging.INFO('Saved model')

            logging.INFO(
                "reopen it with gensim.models.KeyedVectors.load_word2vec_format ('{}')"
                .format(flnm))
Esempio n. 15
0
def get_procedurce():
    # 读取进程pid,名称,可执行路径
    pid = psutil.pids()
    for k, i in enumerate(pid):
        try:
            proc = psutil.Process(i)
            if proc.name() == 'sipadprg':
                logging.INFO(k, i, "%.2f%%" % (proc.memory_percent()), "%",
                             proc.name(), proc.exe())
        except psutil.AccessDenied:
            logging.INFO("psutil.AccessDenied")
Esempio n. 16
0
def main():

	logging.getLogger().setLevel(logging.INFO)

	if FLAGS.train:
		logging.INFO('Training a new model')
		train.

	if FLAGS.evaluate:
		logging.INFO('Evaluating the trained model')

	if FLAGS.predict:
Esempio n. 17
0
    def generate_multi_image(self, v, r, no_of_samples):
        '''used to generate predicted images from z values sampled from a Gaussian Dist.'''
        writer = SummaryWriter('/GQN/chainer-gqn/tensor-log')
        xp = cuda.get_array_module(v)

        batch_size = v.shape[0]
        h_t_gen, c_t_gen, u_t, _, _ = self.generate_initial_state(
            batch_size, xp)
        v = cf.reshape(v, v.shape[:2] + (1, 1))

        # no_of_samples = 100
        reconstructed_images = []
        
        # ht_list = []
        # ut_list = []
        for i in range(no_of_samples):
            h_t_gen, c_t_gen, u_t, _, _ = self.generate_initial_state(
            batch_size, xp)
            for t in range(self.num_layers):
                generation_core = self.get_generation_core(t)

                mean_z_p, ln_var_z_p = self.z_prior_distribution.compute_parameter(
                    h_t_gen)
                z_t = cf.gaussian(mean_z_p, ln_var_z_p)
                writer.add_histogram('Variance of Z',cp.mean(cp.var(z_t,axis=0)).data,t)
                logging.INFO("logged variance of Z")

                h_next_gen, c_next_gen, u_next = generation_core(
                    h_t_gen, c_t_gen, z_t, v, r, u_t)

                u_t = u_next
                h_t_gen = h_next_gen
                c_t_gen = c_next_gen
                writer.add_histogram('Variance of Predicted images',u_t.data,t)
                logging.INFO("logged variance of predicted image")
                writer.close()
            mean_x = self.map_u_x(u_t)
            # reconstructed_images.append(mean_x.data)
            if i == 0:
                shapes_c = [no_of_samples] + [x for x in c_t_gen.shape]
                shapes_z = [no_of_samples] + [x for x in z_t.shape]
                shapes_images = [no_of_samples] + [x for x in mean_x.shape]
                ct_list = cupy.zeros(shapes_c)
                zt_list = cupy.zeros(shapes_z)
                reconstructed_images = cupy.zeros(shapes_images)
            zt_list[i] = z_t.data
            ct_list[i] = c_t_gen.data
            reconstructed_images[i] = mean_x.data 
            # ht_list.append(h_t_gen.data)
            # ut_list.append(u_t.data)
            
        return reconstructed_images, ln_var_z_p.data, zt_list, ct_list
Esempio n. 18
0
def get_network():
    count = psutil.net_io_counters()
    logging.INFO("发送字节数:\033[1;31;42m%s\033[0mbytes,接收字节数:" \
          "\033[1;31;42m%s\033[0mbytes,发送包数:%s,接收包数%s" \
          % (count.bytes_sent, count.bytes_recv, count.packets_sent, \
             count.packets_recv))
    users = psutil.users()
    logging.INFO("当前登录用户:%s" % users[0].name)
    # 时间
    curent_time = psutil.boot_time()
    curent_time_1 = time.strftime("%Y-%m-%d %H:%M:%S",
                                  time.localtime(curent_time))
    print(curent_time_1)
def log(dict_config='N/A', log_lines=['N/A'], _date=datetime.datetime.now()):
    if dict_config == 'N/A':
        #make_log_file(filename='telemetry.log')
        logging.basicConfig(filename='logs\\telemetry.log', level='INFO')
        logging.INFO('New log session, {}{}{}:'.format(str(_date.day),
                                                       str(_date.month),
                                                       str(_date.year)))
        logging.INFO('Starting log without advanced dictionary config...')
    else:
        #File name from config dictionary
        #make_log_file(dict_config['handlers']['fileHandler']['filename'])
        logging.config.dictConfig(dict_config)
    rec_logs(line_info=log_lines)
Esempio n. 20
0
 def edit_delivery_lines(self):
     # confirms moves in DO to finish SO
     delivery_id = helper.find_random_delivery(self.client)
     if not delivery_id:
         logging.INFO("Failed to finish DO -- none found")
         return ()
     delivery_id.action_assign()
     if all(move.state == "assigned" for move in delivery_id.move_lines):
         for operation in delivery_id.pack_operation_product_ids:
             qty_to_do = operation.product_qty
             operation.write({"qty_done": qty_to_do})
         logging.INFO("Confirmed DO: " + delivery_id.name)
         return delivery_id.do_new_transfer()
Esempio n. 21
0
def noaa_api_connector(countries, start_date, end_date=None, metrics=None):
    """Get data from NOAA API.

    Arguments:
        countries(list[str]): List of FIPS country codes to retrieve.
        start_date(datetime)
        end_date(datetime)
        metrics(list[str]): Optional.List of metrics to retrieve,valid values are:
            TMIN: Minimum temperature.
            TMAX: Maximum temperature.
            TAVG: Average of temperature.
            SNOW: Snowfall (mm).
            SNWD: Snow depth (mm).

    Returns:
        tuple[list[dict], list[Exception]]
    """
    if not os.path.isfile(f'{DATA_DIRECTORY}/stations_metadata.txt'):
        download_noaa_files(large_files=False)

    result = list()
    for country in countries:
        logging.info('Requesting data for %s', country)
        urls = get_request_urls(country, start_date, end_date, metrics)
        country_results, errors = get_parse_response(urls)

        if errors:
            logging.INFO(
                'The following errors where found during the operation:')
            for error in errors:
                logging.INFO(error)

        result.extend(country_results)

    data = pd.DataFrame(result)
    stations = load_dataset('stations')
    data = data.merge(stations, how='left', left_on='STATION', right_on='ID')

    del data['ID']
    del data['STATE']

    columns = [
        'DATE', 'STATION', 'LATITUDE', 'LONGITUDE', 'ELEVATION', 'NAME',
        'GSN FLAG', 'HCN/CRN FLAG', 'WMO ID'
    ]

    if metrics is None:
        metrics = DEFAULT_METRICS

    columns.extend([metric for metric in metrics if metric in data.columns])
    return data[columns]
Esempio n. 22
0
def new_user():
    username = request.json.get('username')
    password = request.json.get('password')
    if username is None or password is None:
        logging.INFO('missing arguments')
        abort(400)  # missing arguments
    if User.query.filter_by(username=username).first() is not None:
        logging.INFO('user already exists')
        abort(400)  # existing user

    user = User(username=username)
    user.hash_password(password)
    db.session.add(user)
    db.session.commit()
    return jsonify({'username': user.username})
 def coherence_score(self):
     self.coherence_model_lda = gensim.models.coherencemodel.CoherenceModel(
         model=self.lda_model,
         texts=self.corpus,
         dictionary=self.id2word,
         coherence='c_v')
     logging.INFO(self.coherence_model_lda.get_coherence())
Esempio n. 24
0
 def check(self):
     output = self.status()
     logging.INFO(self.status())
     if output != self.state:
         self.state = output
         return True
     return False
def __load_rules(rules_definition_files):
    """Parse the input rules definition XML files

    :param rules_definition_files: the files containing rules definitions
    :type rules_definition_files: list[str]
    :rtype: ada_default_rules: list[{str, str}]
    """

    for filename in rules_definition_files:
        try:
            tree = ElementTree.parse(filename).getroot()

            # Fetch all rules and fill default rule information
            for rule in tree.findall('./rule'):
                default_rule = {
                    'key': None,
                    'repositoryKey': None
                }

                for child in rule:
                    if child.tag == 'key':
                        default_rule['key'] = child.text
                    else:
                        if child.tag == 'tag' and child.text in Tools:
                            default_rule['repositoryKey'] = child.text
                __print_rule(default_rule)

        except ParseError:
            logging.INFO('failed to parse XML file %s' % filename)
Esempio n. 26
0
 def print_picking_wave(self):
     self.Task.action_picking_wave()
     picking_wave_id = helper.find_random_picking_wave(self.client)
     if not picking_wave_id:
         logging.INFO("Failed to print Picking Wave -- none found")
         return ()
     return picking_wave_id.print_picking()
Esempio n. 27
0
 def save(self, vocab_path):
     vocab = {
         "src": self._src_word2idx,
         "tgt": self._tgt_word2idx
     }
     logging.INFO(f"Vocab save {opt.vocab} ...")
     torch.save(vocab, vocab_path)
Esempio n. 28
0
def get_cpu():
    count = 0
    while count < 1:
        count = count + 1
        time.sleep(1)
        cpu_liyonglv = psutil.cpu_percent()
        logging.INFO("当前cpu利用率:\033[1;31;42m%s%%\033[0m" % cpu_liyonglv)
Esempio n. 29
0
	def parse(self, q, ans_item):
		self.question_id = q.post_id

		if('answer_id' in ans_item):
			self.post_id = ans_item['answer_id']
		else:
			logging.INFO('Malformed Answer: No item[answer_id] in json:\n{0}'.format(ans_item))
			return

		if('link' in ans_item):
			self.link = ans_item['link']

		if('score' in ans_item):
			self.score = ans_item['score']

		if('body_markdown' in ans_item):
			self.body = ans_item['body_markdown']

		if('owner' in ans_item):
			dsp = ans_item['owner']
			if('display_name' in dsp):
				self.owner = dsp['display_name']

		if('is_accepted' in ans_item):
			self.is_accepted = ans_item['is_accepted']

		self.title = q.title
		self.tags = q.tags
		self.is_closed = q.is_closed
		self.close_reason = q.close_reason
Esempio n. 30
0
 def create_lead(self):
     partner_rec = helper.find_random_customer(self.client)
     team_rec = helper.search_browse(self.client,
                                     "crm.team",
                                     [("name", "=", "Direct Sales")],
                                     random_pick=True)
     stage_rec = helper.search_browse(
         self.client,
         "crm.stage",
         [("team_id", "=", False), ("name", "=", "New")],
         random_pick=True,
     )
     vals = {
         "probability": 10,
         "team_id": team_rec.id,
         "partner_id": partner_rec.id,
         "planned_revenue": random.randrange(
             1000, 100001,
             1000),  # random revenue from 1k to 10k by 1k increments
         "priority": str(random.randrange(0, 4,
                                          1)),  # random priority from 0-3
         "type": "opportunity",
         "name": partner_rec.name,
         "stage_id": stage_rec.id,
     }
     lead_id = self.Lead.create(vals)
     logging.INFO("Created Lead: " + partner_rec.name)
     return lead_id