def ping(self, payload=""):
        """
        send ping data.

        payload: data payload to send server.
        """
        logging.degug("Got <- PING")
        self.send(payload, ABNF.OPCODE_PING)
    def ping(self, payload = ""):
        """
        send ping data.

        payload: data payload to send server.
        """
        logging.degug("Got <- PING")
        self.send(payload, ABNF.OPCODE_PING)
def writeBackConfig(pSection=None, pUpdateLastAccess=True):
    if pSection and pUpdateLastAccess:
        logging.info('Updating Last Access in Section')
        pSection['Last Update'] = datetime.now().strftime('%Y/%m/%d %H:%M:%S')

    logging.degug('Writing back Config File!')
    with open(gConfigFile, 'w') as configfile:
        gConfig.write(gconfigFile)
Пример #4
0
def fetchValue(soup):
    table = soup.find('table', class_= 'infobox')
    if table is not None:
        for tr in table.find_all('tr'):
            if (tr.th and tr.th.string == 'Box office'):
                value_parse = (tr.td.split('[')[0]).split(' ')
                value = value_parse[0][1:].replace('.', '')
                if (len(value_parse) > 1):
                    if ('billion' in value_parse[1]):
                        value = float(value) * 10e9
                    elif ('million' in value_parse[1]):
                        value = float(value) * 10e6
                    logging.degug("Fetched  gross value is: " + value)
    else:
        logging.warning("Empty gross value. Information fetching failed!")
Пример #5
0
    def execute(self, args):
        """
        Execute the plugin functionality.

        This method is a plugin requirement from the toolbox module - this
        orchestrates the logic that is contained within the function; in this
        case this means preparing a collection of objects, cache-file pickles
        and other intermediates from the BaseModifications workflow

        Parameters
        ----------
        args: argparse derived object
            The requirement is an argparse object containing the minimally
            required parameters and optional parameters for a run-through of
            the workflow.

        Returns
        -------
        Nothing at all - stuff may be presented to screen.

        """
        warnings.simplefilter(action='ignore', category=FutureWarning)
        os.environ["NUMEXPR_MAX_THREADS"] = str(multiprocessing.cpu_count())
        fast5 = args.fast5
        bam = BamHandler(args.bam, args)
        reference = ReferenceGenome(args.fasta)
        base_mods = BaseModifications(
            fast5, bam, reference, modification=args.modification,
            threshold=args.probability, context=args.context, args=args)
        if args.index:
            logging.degug(
                f"saving base-mod coordinates to CSV file [{args.output}]")
            base_mods.fast5s_to_basemods().to_csv(args.output, sep="\t")
        else:
            logging.debug(f"saving data as CSV file [{args.output}]")
            base_mods.reduce_mapped_methylation_signal().to_csv(
                args.output, sep="\t", index=False, chunksize=1e6)
            # use the chunksize here = from default (None) to 1e6 reduces
            # run time by ~ 15X
        logging.debug(f"fin ...")
Пример #6
0
def compute_gmm_post(seq_file, file_list, model_file, preproc_file,
                     output_path, num_comp, **kwargs):

    sr_args = SR.filter_eval_args(**kwargs)

    if preproc_file is not None:
        preproc = TransformList.load(preproc_file)
    else:
        preproc = None

    gmm = DiagGMM.load_from_kaldi(model_file)

    sr = SR(seq_file,
            file_list,
            batch_size=1,
            shuffle_seqs=False,
            preproc=preproc,
            **sr_args)

    t1 = time.time()

    logging.info(time.time() - t1)
    index = np.zeros((sr.num_seqs, num_comp), dtype=int)

    hw = HypDataWriter(output_path)
    for i in xrange(sr.num_seqs):
        x, key = sr.read_next_seq()
        logging.info('Extracting i-vector %d/%d for %s, num_frames: %d' %
                     (i, sr.num_seqs, key, x.shape[0]))
        r = gmm.compute_z(x)
        r_s, index = to_sparse(r, num_comp)
        if i == 0:
            r2 = to_dense(r_s, index, r.shape[1])
            logging.degug(np.sort(r[0, :])[-12:])
            logging.degug(np.sort(r2[0, :])[-12:])
            logging.degug(np.argsort(r[0, :])[-12:])
            logging.degug(np.argsort(r2[0, :])[-12:])

        hw.write([key], '.r', [r_s])
        hw.write([key], '.index', [index])

    logging.info('Extract elapsed time: %.2f' % (time.time() - t1))
Пример #7
0
    def select(self):
        #
        # Analysis of what the opponent has played so far
        #
        logging.debug("    Entering select()")

        threshold1 = 0.9

        safe_prob_indices = [
            np.sum((np.cumsum(self.proba[k]) <= threshold1).astype(int))
            for k in range(self.k)
        ]
        #
        m = np.array(
            [
                np.dot(
                    np.array(
                        [(t / self.resolution) for t in range(self.resolution + 1)]
                    ),
                    self.proba[k],
                )
                for k in range(self.k)
            ]
        )
        # compute sigma for each bandit
        s = np.array(
            [
                sqrt(
                    np.dot(
                        np.array(
                            [
                                (t / self.resolution) ** 2
                                for t in range(self.resolution + 1)
                            ]
                        ),
                        self.proba[k],
                    )
                    - m[k] ** 2
                )
                for k in range(self.k)
            ]
        )
        #
        # display_msg("values1: {}".format(values1), self.debug)

        logging.debug("---- Inputs ----")
        # logging.debug(m.__class__)
        # logging.debug(m[:5])
        # logging.debug(s[:5])
        # logging.debug(self.f)
        # logging.debug(dir(self.f))

        u = np.vstack([m, s]).T
        logging.debug(u.shape)

        v = self.f(u)

        logging.debug("---- Outputs ----")
        logging.debug(vv.__class__)
        logging.debug(vv.shape)
        logging.debug(dir(vv))
        logging.debug("   {}".format(vv.argmax()))
        logging.degug("- {} -".format(vv[:5]))
        # logging.degug("- {} -".format(int(np.argmax(v))))

        action = int(v.argmax())
        logging.debug("   {}".format(action))
        logging.debug("    Exiting select()")

        return int(v.argmax())
Пример #8
0
        begin_date += date_increment


logging.basicConfig(level=os.environ.get("LOGLEVEL", "INFO"),
                    format='%(asctime)s - %(message)s')
mkto_instance = mktoAPIClient(munchkin_id, launchpoint_service)
first_extract_date = get_first_date(mkto_instance)
last_extract_date = datetime.now(tz=timezone.utc)
logging.info(f'Last date: {last_extract_date}')
all_fields = get_all_fields(mkto_instance)
file_name = f'{munchkin_id}_every_person.csv'
with open(file_name, 'w', newline='', encoding='UTF-8') as csv_file:
    logging.debug(f'Openned output CSV file: {file_name}')
    csv_writer = csv.DictWriter(csv_file, fieldnames=all_fields)
    csv_writer.writeheader()
    all_lead_ids = set()
    for start_at, end_at in \
        all_31day_ranges_between(first_extract_date, last_extract_date):
        leads_dict = \
            get_leads_created_between(mkto_instance, start_at, end_at)
        for row in leads_dict:
            logging.debug(f'Processing: {row}')
            id = row['id']
            if not id in all_lead_ids:
                logging.debug(f'Adding: {id}')
                all_lead_ids.add(id)
                csv_writer.writerow(row)
            else:
                logging.degug(f'Skipping duplicate ID: {id}')
    logging.info(f'Number of leads extracted: {len(all_lead_ids)}')
logging.debug(f'Closed output CSV file: {file_name}')