Beispiel #1
0
def get_marketId():
    log_info('get_marketId')
    def chk(result):
        entries = {}
        for(success, r) in result:
            if not success:
                log_error('create db table failed {}'.format(r))
            else:
                prodID, hubID, d = r
                log_info('prodID {}, hubID {}'.format(prodID, hubID))
                for i in d:
                    i['productId'] = prodID
                    i['hubId'] = hubID
                    i['marketId'] = int(i['marketId'])
                    i['change'] = int(i['change'])
                    i['endDate'] = to_utctimestamp(i['endDate'])
                    i['lastTime'] = to_utctimestamp(i['lastTime'])
                    entries[i['marketId']] = i
        log_info('num of marketID downloaded: {}'.format(len(entries)))
        return entries

    dl = []
    for prodID, hubID in contract_list:
        url = f'https://www.theice.com/marketdata/DelayedMarkets.shtml?getContractsAsJson=&productId={prodID}&hubId={hubID}'
        log_info(url)
        d = download_contracts(url, prodID, hubID)
        dl.append(d)

    dlist = defer.DeferredList(dl)
    dlist.addCallback(chk)
    return dlist
Beispiel #2
0
    def add_disassembly(self, filter_lib):
        """ Collect disassembly information:
            1. Use objdump to collect disassembly for each function in FunctionSet.
            2. Set flag to dump addr_hit_map when generating record info.
        """
        objdump = Objdump(self.ndk_path, self.binary_cache_path)
        cur_lib_name = None
        dso_info = None
        for function in sorted(self.functions.id_to_func.values(),
                               key=lambda a: a.lib_id):
            if function.func_name == 'unknown':
                continue
            lib_name = self.libs.get_lib_name(function.lib_id)
            if lib_name != cur_lib_name:
                cur_lib_name = lib_name
                if filter_lib(lib_name):
                    dso_info = objdump.get_dso_info(lib_name)
                else:
                    dso_info = None
                if dso_info:
                    log_info('Disassemble %s' % dso_info[0])
            if dso_info:
                code = objdump.disassemble_code(dso_info, function.start_addr,
                                                function.addr_len)
                function.disassembly = code

        self.gen_addr_hit_map_in_record_info = True
Beispiel #3
0
    def decode(self):
        """
        Verifies that the receipt can be decoded and that the initial
        contents of the receipt are correct.

        If its invalid, then just return invalid rather than give out any
        information.
        """
        try:
            receipt = decode_receipt(self.receipt)
        except:
            log_exception({
                'receipt': '%s...' % self.receipt[:10],
                'app': self.get_app_id(raise_exception=False)
            })
            log_info('Error decoding receipt')
            raise InvalidReceipt('ERROR_DECODING')

        try:
            assert receipt['user']['type'] == 'directed-identifier'
        except (AssertionError, KeyError):
            log_info('No directed-identifier supplied')
            raise InvalidReceipt('NO_DIRECTED_IDENTIFIER')

        return receipt
Beispiel #4
0
def tba_request(api_url):
    """Sends a single web request to the TBA API v3 api_url is the suffix of the API request URL

    (the part after '/api/v3').
    """
    utils.log_info(f'tba request from {api_url} started')
    full_url = f'https://www.thebluealliance.com/api/v3/{api_url}'
    request_headers = {'X-TBA-Auth-Key': API_KEY}
    cached = local_database_communicator.select_tba_cache(api_url)
    # Check if cache exists
    if cached != {}:
        cached = cached[api_url]
        request_headers['If-Modified-Since'] = cached['timestamp']
    print(f'Retrieving data from {full_url}')
    utils.log_info(f'tba request from {api_url} finished')
    try:
        request = requests.get(full_url, headers=request_headers)
    except requests.exceptions.ConnectionError:
        utils.log_warning('Error: No internet connection.')
        return None
    # A 200 status code means the request was successful
    # 304 means that data was not modified since the last timestamp
    # specified in request_headers['If-Modified-Since']
    if request.status_code == 304:
        return cached['data']
    if request.status_code == 200:
        formatted_data = {
            'timestamp': request.headers['Last-Modified'],
            'data': request.json()
        }
        local_database_communicator.overwrite_tba_data(formatted_data, api_url)
        return request.json()
    raise Warning(f'Request failed with status code {request.status_code}')
Beispiel #5
0
    def _second_parse(self):
        log_info('second_parse begin')
        parser = self.parser
        while parser.has_more_commands():
            line = ''
            parser.advance()
            command_type = parser.command_type()
            if command_type == COMMAND.A_COMMAND:
                symbol = parser.symbol()
                # log_debug("A_COMMAND, symbol - {}".format(symbol))

                address = self._parse_address(symbol)
                line = "0{:015b}\n".format(address)
            elif command_type == COMMAND.L_COMMAND:
                symbol = parser.symbol()
                # log_debug("L_COMMAND, symbol - {}".format(symbol))
            elif command_type == COMMAND.C_COMMAND:
                # dest_str = parser.dest()
                # comp_str = parser.comp()
                # jump_str = parser.jump()
                dest_str, comp_str, jump_str = parser.parse_c_command()
                # log_debug("C_COMMAND, dest - {!r}, comp - {!r}, jump - {!r}".format(dest_str, comp_str, jump_str))

                dest = self.code.dest(dest_str)
                comp = self.code.comp(comp_str)
                jump = self.code.jump(jump_str)

                line = '111{:07b}{:03b}{:03b}\n'.format(comp, dest, jump)
            else:
                raise Exception("Unexpected command - {}".format(command_type))

            # log_debug("line - {}".format(line))
            if line != '':
                self.output.append(line)
        log_info('second_parse end')
def export_tim_data(path):
    """Takes team in match data and writes to CSV. Puts team in match export files into their own

    directory to separate them from team export files.
    """
    # Get the lists of column headers and dictionaries to use in export
    all_tim_data = load_data(TIM_DATA_DB_PATHS)
    column_headers = format_header(all_tim_data, ['match_number', 'team_number'])

    with open(path, 'w') as file:
        # Write headers using the column_headers list
        csv_writer = csv.DictWriter(file, fieldnames=column_headers)
        csv_writer.writeheader()

        for team in TEAMS_LIST:
            # Write rows using data in dictionary
            team_data = []
            for document in all_tim_data:
                if document['team_number'] == team:
                    team_data.append(document)
            tim_data = {}
            for document in team_data:
                if document['match_number'] in tim_data:
                    tim_data[document['match_number']].update(document)
                else:
                    tim_data[document['match_number']] = document
            for document in tim_data.values():
                csv_writer.writerow(document)
    utils.log_info('Exported TIM to CSV')
    def add_disassembly(self, filter_lib):
        """ Collect disassembly information:
            1. Use objdump to collect disassembly for each function in FunctionSet.
            2. Set flag to dump addr_hit_map when generating record info.
        """
        objdump = Objdump(self.ndk_path, self.binary_cache_path)
        cur_lib_name = None
        dso_info = None
        for function in sorted(self.functions.id_to_func.values(), key=lambda a: a.lib_id):
            if function.func_name == 'unknown':
                continue
            lib_name = self.libs.get_lib_name(function.lib_id)
            if lib_name != cur_lib_name:
                cur_lib_name = lib_name
                if filter_lib(lib_name):
                    dso_info = objdump.get_dso_info(lib_name)
                else:
                    dso_info = None
                if dso_info:
                    log_info('Disassemble %s' % dso_info[0])
            if dso_info:
                code = objdump.disassemble_code(dso_info, function.start_addr, function.addr_len)
                function.disassembly = code

        self.gen_addr_hit_map_in_record_info = True
Beispiel #8
0
    def check_db(self):
        """
        Verifies the decoded receipt against the database.

        Requires that decode is run first.
        """
        if not self.decoded:
            raise ValueError('decode not run')

        self.setup_db()
        # Get the addon and user information from the installed table.
        try:
            self.uuid = self.decoded['user']['value']
        except KeyError:
            # If somehow we got a valid receipt without a uuid
            # that's a problem. Log here.
            log_info('No user in receipt')
            raise InvalidReceipt('NO_USER')

        try:
            storedata = self.decoded['product']['storedata']
            self.addon_id = int(dict(parse_qsl(storedata)).get('id', ''))
        except:
            # There was some value for storedata but it was invalid.
            log_info('Invalid store data')
            raise InvalidReceipt('WRONG_STOREDATA')
Beispiel #9
0
def decompress_single_qr(qr_data, qr_type):
    """Decompress a full QR."""
    # Split into generic data and objective/subjective data
    qr_data = qr_data.split(SCHEMA['generic_data']['_section_separator'])
    # Generic QR is first section of QR
    decompressed_data = decompress_generic_qr(qr_data[0])
    # Decompress subjective QR
    if qr_type == QRType.SUBJECTIVE:
        subjective_data = qr_data[1].split(
            SCHEMA['subjective_aim']['_separator'])
        decompressed_data.update(
            decompress_data(subjective_data, 'subjective_aim'))
        if set(decompressed_data.keys()) != SUBJECTIVE_QR_FIELDS:
            raise ValueError('QR missing data fields')
    elif qr_type == QRType.OBJECTIVE:  # Decompress objective QR
        objective_data = qr_data[1].split(
            SCHEMA['objective_tim']['_separator'])
        decompressed_data.update(
            decompress_data(objective_data, 'objective_tim'))
        if set(decompressed_data.keys()) != OBJECTIVE_QR_FIELDS:
            raise ValueError('QR missing data fields')
        utils.log_info(f'Match: {decompressed_data["match_number"]} '
                       f'Team: {decompressed_data["team_number"]} '
                       f'Scout_ID: {decompressed_data["scout_id"]}')
    return decompressed_data
Beispiel #10
0
def collect_data(args):
    """ Run app_profiler.py to generate record file. """
    app_profiler_args = [sys.executable, os.path.join(scripts_path, "app_profiler.py"), "-nb"]
    if args.app:
        app_profiler_args += ["-p", args.app]
    elif args.native_program:
        app_profiler_args += ["-np", args.native_program]
    else:
        log_exit("Please set profiling target with -p or -np option.")
    if args.compile_java_code:
        app_profiler_args.append("--compile_java_code")
    if args.disable_adb_root:
        app_profiler_args.append("--disable_adb_root")
    record_arg_str = ""
    if args.dwarf_unwinding:
        record_arg_str += "-g "
    else:
        record_arg_str += "--call-graph fp "
    if args.events:
        tokens = args.events.split()
        if len(tokens) == 2:
            num_events = tokens[0]
            event_name = tokens[1]
            record_arg_str += "-c %s -e %s " % (num_events, event_name)
        else:
            log_exit("Event format string of -e option cann't be recognized.")
        log_info("Using event sampling (-c %s -e %s)." % (num_events, event_name))
    else:
        record_arg_str += "-f %d " % args.sample_frequency
        log_info("Using frequency sampling (-f %d)." % args.sample_frequency)
    record_arg_str += "--duration %d " % args.capture_duration
    app_profiler_args += ["-r", record_arg_str]
    returncode = subprocess.call(app_profiler_args)
    return returncode == 0
Beispiel #11
0
def parse_samples(process, args, sample_filter_fn):
    """Read samples from record file.
        process: Process object
        args: arguments
        sample_filter_fn: if not None, is used to modify and filter samples.
                          It returns false for samples should be filtered out.
    """

    record_file = args.record_file
    symfs_dir = args.symfs
    kallsyms_file = args.kallsyms

    lib = ReportLib()

    lib.ShowIpForUnknownSymbol()
    if symfs_dir:
        lib.SetSymfs(symfs_dir)
    if record_file:
        lib.SetRecordFile(record_file)
    if kallsyms_file:
        lib.SetKallsymsFile(kallsyms_file)
    if args.show_art_frames:
        lib.ShowArtFrames(True)
    process.cmd = lib.GetRecordCmd()
    product_props = lib.MetaInfo().get("product_props")
    if product_props:
        manufacturer, model, name = product_props.split(':')
        process.props['ro.product.manufacturer'] = manufacturer
        process.props['ro.product.model'] = model
        process.props['ro.product.name'] = name
    if lib.MetaInfo().get('trace_offcpu') == 'true':
        process.props['trace_offcpu'] = True
        if args.one_flamegraph:
            log_exit("It doesn't make sense to report with --one-flamegraph for perf.data " +
                     "recorded with --trace-offcpu.""")
    else:
        process.props['trace_offcpu'] = False

    while True:
        sample = lib.GetNextSample()
        if sample is None:
            lib.Close()
            break
        symbol = lib.GetSymbolOfCurrentSample()
        callchain = lib.GetCallChainOfCurrentSample()
        if sample_filter_fn and not sample_filter_fn(sample, symbol, callchain):
            continue
        process.add_sample(sample, symbol, callchain)

    if process.pid == 0:
        main_threads = [thread for thread in process.threads.values() if thread.tid == thread.pid]
        if main_threads:
            process.name = main_threads[0].name
            process.pid = main_threads[0].pid

    for thread in process.threads.values():
        min_event_count = thread.num_events * args.min_callchain_percentage * 0.01
        thread.flamegraph.trim_callchain(min_event_count)

    log_info("Parsed %s callchains." % process.num_samples)
Beispiel #12
0
def application(environ, start_response):
    status = '200 OK'
    with statsd.timer('services.verify'):

        data = environ['wsgi.input'].read()
        try:
            addon_id = id_re.search(environ['PATH_INFO']).group('addon_id')
        except AttributeError:
            output = ''
            log_info({'receipt': '%s...' % data[:10], 'addon': 'empty'},
                     'Wrong url %s' % environ['PATH_INFO'][:20])
            start_response('500 Internal Server Error', [])
            return [output]

        try:
            verify = Verify(addon_id, data, environ)
            output = verify()
            start_response(status, verify.get_headers(len(output)))
            receipt_cef.log(environ, addon_id, 'verify',
                            'Receipt verification')
        except:
            output = ''
            log_exception({'receipt': '%s...' % data[:10], 'addon': addon_id})
            receipt_cef.log(environ, addon_id, 'verify',
                            'Receipt verification error')
            start_response('500 Internal Server Error', [])

    return [output]
def main():
    parser = argparse.ArgumentParser(description="""
        Annotate source files based on profiling data. It reads line information from binary_cache
        generated by app_profiler.py or binary_cache_builder.py, and generate annotated source
        files in annotated_files directory.""")
    parser.add_argument('-i', '--perf_data_list', nargs='+', action='append', help="""
        The paths of profiling data. Default is perf.data.""")
    parser.add_argument('-s', '--source_dirs', type=extant_dir, nargs='+', action='append', help="""
        Directories to find source files.""")
    parser.add_argument('--comm', nargs='+', action='append', help="""
        Use samples only in threads with selected names.""")
    parser.add_argument('--pid', nargs='+', action='append', help="""
        Use samples only in processes with selected process ids.""")
    parser.add_argument('--tid', nargs='+', action='append', help="""
        Use samples only in threads with selected thread ids.""")
    parser.add_argument('--dso', nargs='+', action='append', help="""
        Use samples only in selected binaries.""")
    parser.add_argument('--ndk_path', type=extant_dir, help='Set the path of a ndk release.')

    args = parser.parse_args()
    config = {}
    config['perf_data_list'] = flatten_arg_list(args.perf_data_list)
    if not config['perf_data_list']:
        config['perf_data_list'].append('perf.data')
    config['source_dirs'] = flatten_arg_list(args.source_dirs)
    config['comm_filters'] = flatten_arg_list(args.comm)
    config['pid_filters'] = flatten_arg_list(args.pid)
    config['tid_filters'] = flatten_arg_list(args.tid)
    config['dso_filters'] = flatten_arg_list(args.dso)
    config['ndk_path'] = args.ndk_path

    annotator = SourceFileAnnotator(config)
    annotator.annotate()
    log_info('annotate finish successfully, please check result in annotated_files/.')
Beispiel #14
0
    def mint_new_block_and_mine(self):

        if not self.mining_paused:
            # Put transaction from waiting list into block
            try:
                chosen_transactions = self.transactions_pool[
                    0:min(self.block_limit, len(self.transactions_pool))]
                if type(chosen_transactions) == type(Transaction):
                    chosen_transactions = [chosen_transactions]
            except IndexError:
                chosen_transactions = []

#           # Create the block
            prev_block = self.chain[[-1]][0]
            new_block = Block(previous_block_hash=prev_block.block_hash,
                              transactions=[
                                  t.export_transaction_to_dict()
                                  for t in chosen_transactions
                              ],
                              height=prev_block.height + 1,
                              start_nounce=self.chain_id * NOUNCE_DISTANCE)
            log_info('[node..Blockchain.mint_new_block_and_mine] Minted block has ({}) transactions. Mining...'\
                     .format(len(chosen_transactions)))
            # dynamic_log_level.set_log_level(0)
            new_block.mine()
            # dynamic_log_level.reset_user_log_level()
            log_info(
                '[node..Blockchain.mint_new_block_and_mine] Block mined @ {}'.
                format(new_block.block_hash))
            return new_block
Beispiel #15
0
def run_checks(file, data_dict, log, commit_range, checks):
    """Checks rule compliance for any given dataset file."""

    committed_lines = get_committed_lines(file, commit_range)
    uncommitted_lines = get_uncommitted_lines(file)
    with open(file, newline="") as csvfile:
        info = csv.DictReader(csvfile, data_dict["columns"])
        header = next(info)
        if "1" in uncommitted_lines or "1" in committed_lines:
            check_header(list(header.values()), data_dict, file, log)
        if uncommitted_lines != [] or committed_lines != []:
            for i, row in enumerate(info):
                i += 2
                line = str(i)
                # The line is either:
                # (1) only uncommitted (needs to always bechecked locally),
                # (2) only committed (needs to always be checked in CI) or
                # (3) both in the unpushed commits and uncommitted (which in
                # practice is the same as (1)--the committed one is
                # deprecated--)
                if (line in uncommitted_lines) or (line in committed_lines):
                    params = [file, row, line, log]
                    for check_rule in checks:
                        if check_rule.__name__ == check_row_length.__name__:
                            check_rule(len(header), *params)
                            continue
                        check_rule(*params)
        else:
            log_info(file, log, "There are no changes to be checked")

    with open(file, 'rb') as fp:
        for line in fp:
            if line.endswith(b'\r\n'):
                log_esp_error(file, log, "Incorrect End of Line encoding")
                break
Beispiel #16
0
 def check_type(self, *types):
     """
     Verifies that the type of receipt is what we expect.
     """
     if self.decoded.get('typ', '') not in types:
         log_info('Receipt type not in %s' % ','.join(types))
         raise InvalidReceipt('WRONG_TYPE')
Beispiel #17
0
    def decode(self):
        """
        Verifies that the receipt can be decoded and that the initial
        contents of the receipt are correct.

        If its invalid, then just return invalid rather than give out any
        information.
        """
        try:
            receipt = decode_receipt(self.receipt)
        except:
            log_exception({
                'receipt': '%s...' % self.receipt[:10],
                'addon': self.addon_id
            })
            log_info('Error decoding receipt')
            raise InvalidReceipt

        try:
            assert receipt['user']['type'] == 'directed-identifier'
        except (AssertionError, KeyError):
            log_info('No directed-identifier supplied')
            raise InvalidReceipt

        return receipt
Beispiel #18
0
    def check_db(self):
        """
        Verifies the decoded receipt against the database.

        Requires that decode is run first.
        """
        if not self.decoded:
            raise ValueError('decode not run')

        self.setup_db()
        # Get the addon and user information from the installed table.
        try:
            self.uuid = self.decoded['user']['value']
        except KeyError:
            # If somehow we got a valid receipt without a uuid
            # that's a problem. Log here.
            log_info('No user in receipt')
            raise InvalidReceipt('NO_USER')

        try:
            storedata = self.decoded['product']['storedata']
            self.addon_id = int(dict(parse_qsl(storedata)).get('id', ''))
        except:
            # There was some value for storedata but it was invalid.
            log_info('Invalid store data')
            raise InvalidReceipt('WRONG_STOREDATA')
Beispiel #19
0
    def check_full(self):
        """
        This is the default that verify will use, this will
        do the entire stack of checks.
        """
        receipt_domain = urlparse(settings.WEBAPPS_RECEIPT_URL).netloc
        try:
            self.decoded = self.decode()
            self.check_type('purchase-receipt')
            self.check_db()
            self.check_url(receipt_domain)
        except InvalidReceipt:
            return self.invalid()

        if self.premium != ADDON_PREMIUM:
            log_info('Valid receipt, not premium')
            return self.ok_or_expired()

        try:
            self.check_purchase()
        except InvalidReceipt:
            return self.invalid()
        except RefundedReceipt:
            return self.refund()

        return self.ok_or_expired()
def export_team_data(path):
    """Takes data team data and writes to CSV.
    Merges raw and processed team data into one dictionary
    Puts team export files into their own directory
    to separate them from team in match export files.
    """
    # Get the lists of column headers and dictionaries to use in export
    team_data = load_data(TEAM_DATA_DB_PATHS)
    column_headers = format_header(team_data, ['team_number'])
    # The list of teams, used to merge raw and processed team data

    with open(path, 'w') as file:
        # Write headers using the column_headers list
        csv_writer = csv.DictWriter(file, fieldnames=column_headers)
        csv_writer.writeheader()

        for team in TEAMS_LIST:
            # The dictionary that will hold the combined team data, reset for each team
            merged_team = {}
            # Go through all dictionaries and check if their team number matches the team's
            for document in team_data:
                if document.get('team_number') == team:
                    # Update data from the same team to the merged_team dict
                    merged_team.update(document)
            # Use each team's merged data to write a row
            csv_writer.writerow(merged_team)
    utils.log_info('Exported team data to CSV')
Beispiel #21
0
def application(environ, start_response):
    status = "200 OK"
    with statsd.timer("services.verify"):

        data = environ["wsgi.input"].read()
        try:
            addon_id = id_re.search(environ["PATH_INFO"]).group("addon_id")
        except AttributeError:
            output = ""
            log_info({"receipt": "%s..." % data[:10], "addon": "empty"}, "Wrong url %s" % environ["PATH_INFO"][:20])
            start_response("500 Internal Server Error", [])
            return [output]

        try:
            verify = Verify(addon_id, data, environ)
            output = verify()
            start_response(status, verify.get_headers(len(output)))
            receipt_cef.log(environ, addon_id, "verify", "Receipt verification")
        except:
            output = ""
            log_exception({"receipt": "%s..." % data[:10], "addon": addon_id})
            receipt_cef.log(environ, addon_id, "verify", "Receipt verification error")
            start_response("500 Internal Server Error", [])

    return [output]
Beispiel #22
0
def application(environ, start_response):
    status = '200 OK'
    with statsd.timer('services.verify'):

        data = environ['wsgi.input'].read()
        try:
            addon_id = id_re.search(environ['PATH_INFO']).group('addon_id')
        except AttributeError:
            output = ''
            log_info({
                'receipt': '%s...' % data[:10],
                'addon': 'empty'
            }, 'Wrong url %s' % environ['PATH_INFO'][:20])
            start_response('500 Internal Server Error', [])
            return [output]

        try:
            verify = Verify(addon_id, data, environ)
            output = verify()
            start_response(status, verify.get_headers(len(output)))
            receipt_cef.log(environ, addon_id, 'verify',
                            'Receipt verification')
        except:
            output = ''
            log_exception({'receipt': '%s...' % data[:10], 'addon': addon_id})
            receipt_cef.log(environ, addon_id, 'verify',
                            'Receipt verification error')
            start_response('500 Internal Server Error', [])

    return [output]
Beispiel #23
0
    def check_full(self):
        """
        This is the default that verify will use, this will
        do the entire stack of checks.
        """
        try:
            self.decoded = self.decode()
            self.check_type('purchase-receipt')
            self.check_db()
            self.check_url()
        except InvalidReceipt:
            return self.invalid()

        if self.premium != ADDON_PREMIUM:
            log_info('Valid receipt, not premium')
            return self.ok_or_expired()

        try:
            self.check_purchase()
        except InvalidReceipt:
            return self.invalid()
        except RefundedReceipt:
            return self.refund()

        return self.ok_or_expired()
Beispiel #24
0
    def persist_marketId(self, data):
        log_info('persist_marketId')
        if self.marketIdData == data:
            return
        else:
            self.marketIdData = data

        def cb(conn):
            records = []
            for entry in data.values():
                records.append('''("{}", "{}", "{}", "{}", "{}", "{}", "{}", "{}", "{}")'''.format(
                    entry['productId'],
                    entry['hubId'],
                    entry['marketId'],
                    entry['lastTime'],
                    entry['endDate'],
                    entry['lastPrice'],
                    entry['marketStrip'],
                    entry['volume'],
                    entry['change'],
                ))
            v = ','.join(records)
            cmd = 'INSERT OR IGNORE INTO Contracts(productId, hubId, marketId, lastTime, endDate, lastPrice, marketStrip, volume, change) values {}'.format(v)
            d = conn.runOperation(cmd)

            def eb(e):
                log_error('Persist data failed {}'.format(e))
                log_error(cmd)
            d.addErrback(eb)
            return d

        d = defer.maybeDeferred(self.load)
        d.addCallback(cb)
        return d
Beispiel #25
0
 def check_type(self, *types):
     """
     Verifies that the type of receipt is what we expect.
     """
     if self.decoded.get('typ', '') not in types:
         log_info('Receipt type not in %s' % ','.join(types))
         raise InvalidReceipt('WRONG_TYPE')
def main():
    parser = argparse.ArgumentParser(description="""
        Annotate source files based on profiling data. It reads line information from binary_cache
        generated by app_profiler.py or binary_cache_builder.py, and generate annotated source
        files in annotated_files directory.""")
    parser.add_argument('-i',
                        '--perf_data_list',
                        nargs='+',
                        action='append',
                        help="""
        The paths of profiling data. Default is perf.data.""")
    parser.add_argument('-s',
                        '--source_dirs',
                        type=extant_dir,
                        nargs='+',
                        action='append',
                        help="""
        Directories to find source files.""")
    parser.add_argument('--comm',
                        nargs='+',
                        action='append',
                        help="""
        Use samples only in threads with selected names.""")
    parser.add_argument('--pid',
                        nargs='+',
                        action='append',
                        help="""
        Use samples only in processes with selected process ids.""")
    parser.add_argument('--tid',
                        nargs='+',
                        action='append',
                        help="""
        Use samples only in threads with selected thread ids.""")
    parser.add_argument('--dso',
                        nargs='+',
                        action='append',
                        help="""
        Use samples only in selected binaries.""")
    parser.add_argument('--ndk_path',
                        type=extant_dir,
                        help='Set the path of a ndk release.')

    args = parser.parse_args()
    config = {}
    config['perf_data_list'] = flatten_arg_list(args.perf_data_list)
    if not config['perf_data_list']:
        config['perf_data_list'].append('perf.data')
    config['source_dirs'] = flatten_arg_list(args.source_dirs)
    config['comm_filters'] = flatten_arg_list(args.comm)
    config['pid_filters'] = flatten_arg_list(args.pid)
    config['tid_filters'] = flatten_arg_list(args.tid)
    config['dso_filters'] = flatten_arg_list(args.dso)
    config['ndk_path'] = args.ndk_path

    annotator = SourceFileAnnotator(config)
    annotator.annotate()
    log_info(
        'annotate finish successfully, please check result in annotated_files/.'
    )
Beispiel #27
0
 def disconnect(self):
     ret_val = self.sem_api.ClosingControl()
     if ret_val == 0:
         utils.log_info('SEM', 'Disconnected from SmartSEM.')
         return True
     else:
         utils.log_error('SEM', f'ERROR disconnecting from SmartSEM (ret_val: {ret_val}).')
         return False
def SCHED_mine_for_block(blockchain, sched):
    random_id = random.randint(0, 1000)
    log_info(
        "[SCHED_mine_for_block]({}) Starting routine... ".format(random_id))
    return {
        'sched': sched,
        'blockchain': blockchain,
        'new_block': blockchain.mint_new_block_and_mine()
    }
def unzip_recording_data(args):
    zip_file_path = os.path.join(args.out_dir, 'simpleperf_data.zip')
    with zipfile.ZipFile(zip_file_path, 'r') as zip_fh:
        names = zip_fh.namelist()
        log_info('There are %d recording data files.' % len(names))
        for name in names:
            log_info('recording file: %s' % os.path.join(args.out_dir, name))
            zip_fh.extract(name, args.out_dir)
    remove(zip_file_path)
Beispiel #30
0
def unzip_recording_data(args):
    zip_file_path = os.path.join(args.out_dir, 'simpleperf_data.zip')
    with zipfile.ZipFile(zip_file_path, 'r') as zip_fh:
        names = zip_fh.namelist()
        log_info('There are %d recording data files.' % len(names))
        for name in names:
            log_info('recording file: %s' % os.path.join(args.out_dir, name))
            zip_fh.extract(name, args.out_dir)
    remove(zip_file_path)
Beispiel #31
0
    def gen_source_lines(self):
        # 1. Create Addr2line instance
        if not self.config.get('binary_cache_dir'):
            log_info("Can't generate line information because binary_cache is missing.")
            return
        if not find_tool_path('addr2line', self.config['ndk_path']):
            log_info("Can't generate line information because can't find addr2line.")
            return
        addr2line = Addr2Nearestline(self.config['ndk_path'], self.config['binary_cache_dir'], True)

        # 2. Put all needed addresses to it.
        for location in self.location_list:
            mapping = self.get_mapping(location.mapping_id)
            dso_name = self.get_string(mapping.filename_id)
            if location.lines:
                function = self.get_function(location.lines[0].function_id)
                addr2line.add_addr(dso_name, function.vaddr_in_dso, location.vaddr_in_dso)
        for function in self.function_list:
            dso_name = self.get_string(function.dso_name_id)
            addr2line.add_addr(dso_name, function.vaddr_in_dso, function.vaddr_in_dso)

        # 3. Generate source lines.
        addr2line.convert_addrs_to_lines()

        # 4. Annotate locations and functions.
        for location in self.location_list:
            if not location.lines:
                continue
            mapping = self.get_mapping(location.mapping_id)
            dso_name = self.get_string(mapping.filename_id)
            dso = addr2line.get_dso(dso_name)
            if not dso:
                continue
            sources = addr2line.get_addr_source(dso, location.vaddr_in_dso)
            if not sources:
                continue
            for (source_id, source) in enumerate(sources):
                source_file, source_line, function_name = source
                function_id = self.get_function_id(function_name, dso_name, 0)
                if function_id == 0:
                    continue
                if source_id == 0:
                    # Clear default line info
                    location.lines = []
                location.lines.append(self.add_line(source_file, source_line, function_id))

        for function in self.function_list:
            dso_name = self.get_string(function.dso_name_id)
            if function.vaddr_in_dso:
                dso = addr2line.get_dso(dso_name)
                if not dso:
                    continue
                sources = addr2line.get_addr_source(dso, function.vaddr_in_dso)
                if sources:
                    source_file, source_line, _ = sources[0]
                    function.source_filename_id = self.get_string_id(source_file)
                    function.start_line = source_line
Beispiel #32
0
 def make_wallets(amount):
     data = []
     for i in range(amount):
         progress(i,amount-1,'making wallets...')
         pub, pri = ppk_keygen()
         wallet = {'public':pub,\
                   'private':pri}
         data.append(wallet)
     log_info('[client.Client.make_wallets] writing ({}) wallets to pickle...'.format(amount))
     utils.save_pickle(data,WALLETS_DIR)
Beispiel #33
0
 def log_info_perf(self, epoch):
     test_perf = self.frac_err(self.W, self.test_images, self.test_labels)
     train_perf = self.frac_err(self.W, self.train_images,
                                self.train_labels)
     if test_perf > self.prev_test_perf:
         self.learning_rate = 0.1 * self.learning_rate
     self.prev_test_perf = test_perf
     utils.log_info(
         "Epoch {0}, TrainErr {1:5}, TestErr {2:5}, LR {3:2}".format(
             self.epoch, train_perf, test_perf, self.learning_rate))
Beispiel #34
0
 def get_storedata(self):
     """
     Attempt to retrieve the storedata information from the receipt.
     """
     try:
         storedata = self.decoded['product']['storedata']
         return dict(parse_qsl(storedata))
     except Exception, e:
         log_info('Invalid store data: {err}'.format(err=e))
         raise InvalidReceipt('WRONG_STOREDATA')
Beispiel #35
0
 def get_storedata(self):
     """
     Attempt to retrieve the storedata information from the receipt.
     """
     try:
         storedata = self.decoded['product']['storedata']
         return dict(parse_qsl(storedata))
     except Exception, e:
         log_info('Invalid store data: {err}'.format(err=e))
         raise InvalidReceipt('WRONG_STOREDATA')
Beispiel #36
0
def delete_tablet_downloads():
    """Deletes all data from the Download folder of tablets"""
    devices = get_attached_devices()
    # Wait for USB connection to initialize
    time.sleep(.1)
    for device in devices:
        utils.run_command(
            f'adb -s {device} shell rm -r /storage/sdcard0/Download/*')
        utils.log_info(
            f'Removed Downloads on {DEVICE_SERIAL_NUMBERS[device]}, ({device})'
        )
Beispiel #37
0
 def start_profiling(self, target_args):
     """Start simpleperf reocrd process on device."""
     args = ['/data/local/tmp/simpleperf', 'record', '-o', '/data/local/tmp/perf.data',
             self.args.record_options]
     if self.adb.run(['shell', 'ls', NATIVE_LIBS_DIR_ON_DEVICE, '>/dev/null', '2>&1']):
         args += ['--symfs', NATIVE_LIBS_DIR_ON_DEVICE]
     args += ['--log', self.args.log]
     args += target_args
     adb_args = [self.adb.adb_path, 'shell'] + args
     log_info('run adb cmd: %s' % adb_args)
     self.record_subproc = subprocess.Popen(adb_args)
Beispiel #38
0
 def create_genesis_block(self):
     genesis_block = Block(previous_block_hash=0,
                           transactions=[],
                           height=0,
                           mining_difficulty=1,
                           start_nounce=self.chain_id *
                           self.nounce_distance)
     genesis_block = genesis_block.mine()
     self.add_block(genesis_block)
     log_info("Genesis Block Hash: {}".format(genesis_block.block_hash))
     return
Beispiel #39
0
 def get_user(self):
     """
     Attempt to retrieve the user information from the receipt.
     """
     try:
         return self.decoded['user']['value']
     except KeyError:
         # If somehow we got a valid receipt without a uuid
         # that's a problem. Log here.
         log_info('No user in receipt')
         raise InvalidReceipt('NO_USER')
Beispiel #40
0
def run():
    def done(_):
        log_info('Job done at {}'.format(datetime.now().strftime("%Y%m%d %H:%M:%S")))

    log_info('Job start at {}'.format(datetime.now().strftime("%Y%m%d %H:%M:%S")))
    d = get_marketId()
    d.addCallback(get_urls)
    d.addCallback(get_prices)
    d.addBoth(done)

    reactor.callLater(interval, run)
Beispiel #41
0
 def get_user(self):
     """
     Attempt to retrieve the user information from the receipt.
     """
     try:
         return self.decoded['user']['value']
     except KeyError:
         # If somehow we got a valid receipt without a uuid
         # that's a problem. Log here.
         log_info('No user in receipt')
         raise InvalidReceipt('NO_USER')
Beispiel #42
0
 def get_inapp_id(self):
     """
     Attempt to retrieve the inapp id
     from the storedata in the receipt.
     """
     try:
         return int(self.get_storedata()['inapp_id'])
     except Exception, e:
         # There was some value for storedata but it was invalid.
         log_info('Invalid store data for inapp id: {err}'.format(
             err=e))
         raise InvalidReceipt('WRONG_STOREDATA')
 def _copy_to_binary_cache(self, from_path, expected_build_id, target_file):
     if target_file[0] == '/':
         target_file = target_file[1:]
     target_file = target_file.replace('/', os.sep)
     target_file = os.path.join(self.binary_cache_dir, target_file)
     if not self._need_to_copy(from_path, target_file, expected_build_id):
         # The existing file in binary_cache can provide more information, so no need to copy.
         return
     target_dir = os.path.dirname(target_file)
     if not os.path.isdir(target_dir):
         os.makedirs(target_dir)
     log_info('copy to binary_cache: %s to %s' % (from_path, target_file))
     shutil.copy(from_path, target_file)
Beispiel #44
0
    def check_purchase(self):
        """
        Verifies that the app has been purchased.
        """
        sql = """SELECT id, type FROM addon_purchase
                 WHERE addon_id = %(addon_id)s
                 AND uuid = %(uuid)s LIMIT 1;"""
        self.cursor.execute(sql, {'addon_id': self.addon_id,
                                  'uuid': self.uuid})
        result = self.cursor.fetchone()
        if not result:
            log_info('Invalid receipt, no purchase')
            raise InvalidReceipt('NO_PURCHASE')

        if result[-1] in (CONTRIB_REFUND, CONTRIB_CHARGEBACK):
            log_info('Valid receipt, but refunded')
            raise RefundedReceipt

        elif result[-1] in (CONTRIB_PURCHASE, CONTRIB_NO_CHARGE):
            log_info('Valid receipt')
            return

        else:
            log_info('Valid receipt, but invalid contribution')
            raise InvalidReceipt('WRONG_PURCHASE')
Beispiel #45
0
    def check_url(self):
        """
        Verifies that the URL of the verification is what we expect.
        """
        path = self.environ['PATH_INFO']
        parsed = urlparse(self.decoded.get('verify', ''))

        if parsed.netloc not in settings.DOMAIN:
            log_info('Receipt had invalid domain')
            raise InvalidReceipt

        if parsed.path != path:
            log_info('Receipt had the wrong path')
            raise InvalidReceipt
Beispiel #46
0
    def check_purchase_app(self):
        """
        Verifies that the app has been purchased by the user.
        """
        self.setup_db()
        sql = """SELECT type FROM addon_purchase
                 WHERE addon_id = %(app_id)s
                 AND uuid = %(uuid)s LIMIT 1;"""
        self.cursor.execute(sql, {'app_id': self.get_app_id(),
                                  'uuid': self.get_user()})
        result = self.cursor.fetchone()
        if not result:
            log_info('Invalid receipt, no purchase')
            raise InvalidReceipt('NO_PURCHASE')

        self.check_purchase_type(result[0])
 def profile(self):
     log_info('prepare profiling')
     self.prepare()
     log_info('start profiling')
     self.start()
     self.wait_profiling()
     log_info('collect profiling data')
     self.collect_profiling_data()
     log_info('profiling is finished.')
    def _annotate_file(self, from_path, to_path, file_period, is_java):
        """Annotate a source file.

        Annotate a source file in three steps:
          1. In the first line, show periods of this file.
          2. For each function, show periods of this function.
          3. For each line not hitting the same line as functions, show
             line periods.
        """
        log_info('annotate file %s' % from_path)
        with open(from_path, 'r') as rf:
            lines = rf.readlines()

        annotates = {}
        for line in file_period.line_dict.keys():
            annotates[line] = self._get_percentage_str(file_period.line_dict[line], True)
        for func_name in file_period.function_dict.keys():
            func_start_line, period = file_period.function_dict[func_name]
            if func_start_line == -1:
                continue
            line = func_start_line - 1 if is_java else func_start_line
            annotates[line] = '[func] ' + self._get_percentage_str(period, True)
        annotates[1] = '[file] ' + self._get_percentage_str(file_period.period, True)

        max_annotate_cols = 0
        for key in annotates:
            max_annotate_cols = max(max_annotate_cols, len(annotates[key]))

        empty_annotate = ' ' * (max_annotate_cols + 6)

        dirname = os.path.dirname(to_path)
        if not os.path.isdir(dirname):
            os.makedirs(dirname)
        with open(to_path, 'w') as wf:
            for line in range(1, len(lines) + 1):
                annotate = annotates.get(line)
                if annotate is None:
                    if not lines[line-1].strip():
                        annotate = ''
                    else:
                        annotate = empty_annotate
                else:
                    annotate = '/* ' + annotate + (
                        ' ' * (max_annotate_cols - len(annotate))) + ' */'
                wf.write(annotate)
                wf.write(lines[line-1])
Beispiel #49
0
    def ok_or_expired(self):
        # This receipt is ok now let's check it's expiry.
        # If it's expired, we'll have to return a new receipt
        try:
            expire = int(self.decoded.get('exp', 0))
        except ValueError:
            log_info('Error with expiry in the receipt')
            return self.expired()

        now = calendar.timegm(gmtime()) + 10  # For any clock skew.
        if now > expire:
            log_info('This receipt has expired: %s UTC < %s UTC'
                     % (datetime.utcfromtimestamp(expire),
                        datetime.utcfromtimestamp(now)))
            return self.expired()

        return self.ok()
Beispiel #50
0
    def check_url(self, domain):
        """
        Verifies that the URL of the verification is what we expect.

        :param domain: the domain you expect the receipt to be verified at,
            note that "real" receipts are verified at a different domain
            from the main marketplace domain.
        """
        path = self.environ['PATH_INFO']
        parsed = urlparse(self.decoded.get('verify', ''))

        if parsed.netloc != domain:
            log_info('Receipt had invalid domain')
            raise InvalidReceipt('WRONG_DOMAIN')

        if parsed.path != path:
            log_info('Receipt had the wrong path')
            raise InvalidReceipt('WRONG_PATH')
Beispiel #51
0
    def check_purchase_inapp(self):
        """
        Verifies that the inapp has been purchased.
        """
        self.setup_db()
        sql = """SELECT inapp_product_id, type FROM stats_contributions
                 WHERE id = %(contribution_id)s LIMIT 1;"""
        self.cursor.execute(
            sql,
            {'contribution_id': self.get_contribution_id()}
        )
        result = self.cursor.fetchone()
        if not result:
            log_info('Invalid receipt, no purchase')
            raise InvalidReceipt('NO_PURCHASE')

        contribution_inapp_id, purchase_type = result
        self.check_purchase_type(purchase_type)
        self.check_inapp_product(contribution_inapp_id)
 def _check_and_pull_binary(self, binary, expected_build_id, binary_cache_file):
     """If the binary_cache_file exists and has the expected_build_id, there
        is no need to pull the binary from device. Otherwise, pull it.
     """
     need_pull = True
     if os.path.isfile(binary_cache_file):
         need_pull = False
         if expected_build_id:
             build_id = self._read_build_id(binary_cache_file)
             if expected_build_id != build_id:
                 need_pull = True
     if need_pull:
         target_dir = os.path.dirname(binary_cache_file)
         if not os.path.isdir(target_dir):
             os.makedirs(target_dir)
         if os.path.isfile(binary_cache_file):
             os.remove(binary_cache_file)
         log_info('pull file to binary_cache: %s to %s' % (binary, binary_cache_file))
         self._pull_file_from_device(binary, binary_cache_file)
     else:
         log_info('use current file in binary_cache: %s' % binary_cache_file)
Beispiel #53
0
    def decode(self):
        """
        Verifies that the receipt can be decoded and that the initial
        contents of the receipt are correct.

        If its invalid, then just return invalid rather than give out any
        information.
        """
        try:
            receipt = decode_receipt(self.receipt)
        except:
            log_exception({'receipt': '%s...' % self.receipt[:10],
                           'addon': self.addon_id})
            log_info('Error decoding receipt')
            raise InvalidReceipt('ERROR_DECODING')

        try:
            assert receipt['user']['type'] == 'directed-identifier'
        except (AssertionError, KeyError):
            log_info('No directed-identifier supplied')
            raise InvalidReceipt('NO_DIRECTED_IDENTIFIER')

        return receipt
Beispiel #54
0
    def check_purchase_type(self, purchase_type):
        """
        Verifies that the purchase type is of a valid type.
        """
        if purchase_type in (CONTRIB_REFUND, CONTRIB_CHARGEBACK):
            log_info('Valid receipt, but refunded')
            raise RefundedReceipt

        elif purchase_type in (CONTRIB_PURCHASE, CONTRIB_NO_CHARGE):
            log_info('Valid receipt')
            return

        else:
            log_info('Valid receipt, but invalid contribution')
            raise InvalidReceipt('WRONG_PURCHASE')
Beispiel #55
0
    def check_db(self):
        """
        Verifies the decoded receipt against the database.

        Requires that decode is run first.
        """
        if not self.decoded:
            raise ValueError('decode not run')

        self.setup_db()
        # Get the addon and user information from the installed table.
        try:
            uuid = self.decoded['user']['value']
        except KeyError:
            # If somehow we got a valid receipt without a uuid
            # that's a problem. Log here.
            log_info('No user in receipt')
            raise InvalidReceipt('NO_USER')

        try:
            storedata = self.decoded['product']['storedata']
            self.addon_id = int(dict(parse_qsl(storedata)).get('id', ''))
        except:
            # There was some value for storedata but it was invalid.
            log_info('Invalid store data')
            raise InvalidReceipt('WRONG_STOREDATA')

        sql = """SELECT id, user_id, premium_type FROM users_install
                 WHERE addon_id = %(addon_id)s
                 AND uuid = %(uuid)s LIMIT 1;"""
        self.cursor.execute(sql, {'addon_id': self.addon_id,
                                  'uuid': uuid})
        result = self.cursor.fetchone()
        if not result:
            # We've got no record of this receipt being created.
            log_info('No entry in users_install for uuid: %s' % uuid)
            raise InvalidReceipt('WRONG_USER')

        pk, self.user_id, self.premium = result
Beispiel #56
0
 def check_inapp_product(self, contribution_inapp_id):
     if int(contribution_inapp_id) != self.get_inapp_id():
         log_info('Invalid receipt, inapp_id does not match')
         raise InvalidReceipt('NO_PURCHASE')
Beispiel #57
0
    def __init__(self):
        """
        Headers:
        ========
        Header-Name	Type	Required	 	Description
        --------------------------------------------------------
        Referer		String	N	 		Referential url. If seed-file given this is not required.
        X-Progress	String	N	 		The rate of progress of target download file.
        X-Client-ID	String	Y			Thunder client id
        X-File-Name	String	given url -> Y,
                                given seed-file -> N	The file name of target download file (UTF-8 encoded). If seed-file given this is not required.
        X-File-Size	String	given url -> Y,
                                given seed-file -> N	The size of target download file. If seed-file given this is not required.
        X-Mime-Type	String	N		        The mime-type of target download file. If seed-file given this is not required.
        X-URL		String	Y			url or seed-file-path

        GetParams:params
        ==========
        Parameter	 Type	 Required	Description
        --------------------------------------------------------
        key		 String	 Y	 	apikey given by mysite.cn must keep it secret.
        hash		 String	 N	 	file_private_id (hash value generate by thunder client)
        digest		 String	 Y	 	The digest of url or seed-file
        digest-algorithm String	 Y	 	Digest algorithm

        Steps:
        ======
          * Parse query_string
          * Auth apikey
          * Parse HTTP headers
          * Check parameters
          * Parse MIME type

        """

        self.uuid = uuid.uuid1().hex

        env = web.ctx.env

        self.error_code = None
        self.error_msg = ''
        self.error_data = []

        params = web.input()
        self.key       = params.get('key', None)
        self.hash      = params.get('hash', None)
        self.digest    = params.get('digest', None)
        self.algorithm = params.get('digest-algorithm', None)

        if not self.authenticated:
            log_normal(logger, {
                'action': 'request-unauthorized',
                'info': {
                    'message': 'unauthorized....',
                    'apikey': self.key
                }
            }, LOG_INFO, uuid=self.uuid)
            raise Unauthorized(wrap_error(ERROR_APIKEY, 'APIKEY Error', ['key']))

        self.referer     = env.get(HEADER_REFERER, None)
        self.last_query  = env.get(HEADER_LAST_QUERY, None)
        self.cur_query   = env.get(HEADER_CUR_QUERY, None)
        self.progress    = env.get(HEADER_PROGRESS, None)
        self.client_addr = env.get(HEADER_CLIENT_ADDR, None)
        self.client_id   = env.get(HEADER_CLIENT_ID, None)
        self.file_name   = env.get(HEADER_FILE_NAME, None)
        self.file_size   = env.get(HEADER_FILE_SIZE, None)
        self.mime_type   = env.get(HEADER_MIME_TYPE, None)
        self.url         = env.get(HEADER_URL, None)
        self.user_agent  = env.get(HEADER_USER_AGENT,None)
        self.request_time = None
        # PRD Section 3.4 requirement.
        try:
            log_info(logger_info, {
                'action': 'show-request-env',
                'info': {
                    # From: Query String
                    'key'          : self.key,
                    'hash'         : self.hash,
                    'digest'       : self.digest,
                    'algorithm'    : self.algorithm,
                    # From: Headers
                    'referer'      : self.referer,
                    'client_id'    : self.client_id,
                    'client_addr'  : self.client_addr,
                    'progress'     : self.progress,
                    'file_name'    : self.file_name,
                    'file_size'    : self.file_size,
                    'mime_type'    : self.mime_type,
                    'url'          : self.url,
                    'user_agent'   : self.user_agent,
                    'request_time' : datetime.now().strftime('%Y-%m-%d %H:%M:%S')
                }
            }, uuid=self.uuid)
            self.request_time = datetime.now()
        except UnicodeDecodeError:
            self.error_code = ERROR_PARAMS
            self.error_msg = 'Parameter encoding unexcept!'
            self.error_data.extend(['Header:Referer',
                                    'Header:X-File-Name',
                                    'Header:X-URL'])
            message = wrap_error(self.error_code, self.error_msg, self.error_data)
            log_normal(logger, {
                'action': 'bad-request',
                'info': message
            }, LOG_WARN, uuid=self.uuid)
            raise web.BadRequest(message)

        try:
            for k in ('last_query', 'cur_query'):
                field = getattr(self, k)
                if field:
                    setattr(self, k, datetime(*rfc822.parsedate(field)[:-3]))
        except TypeError:
            self.error_code = ERROR_PARAMS
            self.error_msg = 'If-Modified-Since or Date error!'
            self.error_data.append('Header:If-Modified-Since')
            self.error_data.append('Header:Date')

        if self.file_size:
            try:
                int(self.file_size)
            except ValueError:
                self.error_code = ERROR_PARAMS
                self.error_msg = 'X-File-Size is not integer!'
                self.error_data.append('X-File-Size')

        if (not self.client_id
            or not self.digest
            or not self.algorithm):
            # Get parameter or header missing
            self.error_code = ERROR_PARAMS
            self.error_msg = 'Parameter not enough!'

        for filed, tag in [
                (self.client_id   , 'X-Client-ID'),
                (self.digest      , 'digest'),
                (self.algorithm   , 'digest-algorithm')]:
            if not filed:
                self.error_data.append(tag)

        self.is_seed = not self.url or self.url.strip().startswith("file://")
        self.ext = None
        if self.file_name:
            parts = self.file_name.rsplit('.', 1)
            if len(parts) >= 2:
                self.ext = parts[-1]

        self.scheme = url_scheme(self.url)

        # If config matched transform the digest(hashinfo) to lower case
        if (DIGEST_TRANSFORM
            and self.algorithm not in IGNORE_ALGORITHMS
            and self.digest):
            self.digest = self.digest.lower()
def main():
    sys.setrecursionlimit(MAX_CALLSTACK_LENGTH * 2 + 50)
    parser = argparse.ArgumentParser(description='report profiling data')
    parser.add_argument('-i', '--record_file', nargs='+', default=['perf.data'], help="""
                        Set profiling data file to report. Default is perf.data.""")
    parser.add_argument('-o', '--report_path', default='report.html', help="""
                        Set output html file. Default is report.html.""")
    parser.add_argument('--min_func_percent', default=0.01, type=float, help="""
                        Set min percentage of functions shown in the report.
                        For example, when set to 0.01, only functions taking >= 0.01%% of total
                        event count are collected in the report. Default is 0.01.""")
    parser.add_argument('--min_callchain_percent', default=0.01, type=float, help="""
                        Set min percentage of callchains shown in the report.
                        It is used to limit nodes shown in the function flamegraph. For example,
                        when set to 0.01, only callchains taking >= 0.01%% of the event count of
                        the starting function are collected in the report. Default is 0.01.""")
    parser.add_argument('--add_source_code', action='store_true', help='Add source code.')
    parser.add_argument('--source_dirs', nargs='+', help='Source code directories.')
    parser.add_argument('--add_disassembly', action='store_true', help='Add disassembled code.')
    parser.add_argument('--binary_filter', nargs='+', help="""Annotate source code and disassembly
                        only for selected binaries.""")
    parser.add_argument('--ndk_path', nargs=1, help='Find tools in the ndk path.')
    parser.add_argument('--no_browser', action='store_true', help="Don't open report in browser.")
    parser.add_argument('--show_art_frames', action='store_true',
                        help='Show frames of internal methods in the ART Java interpreter.')
    args = parser.parse_args()

    # 1. Process args.
    binary_cache_path = 'binary_cache'
    if not os.path.isdir(binary_cache_path):
        if args.add_source_code or args.add_disassembly:
            log_exit("""binary_cache/ doesn't exist. Can't add source code or disassembled code
                        without collected binaries. Please run binary_cache_builder.py to
                        collect binaries for current profiling data, or run app_profiler.py
                        without -nb option.""")
        binary_cache_path = None

    if args.add_source_code and not args.source_dirs:
        log_exit('--source_dirs is needed to add source code.')
    build_addr_hit_map = args.add_source_code or args.add_disassembly
    ndk_path = None if not args.ndk_path else args.ndk_path[0]

    # 2. Produce record data.
    record_data = RecordData(binary_cache_path, ndk_path, build_addr_hit_map)
    for record_file in args.record_file:
        record_data.load_record_file(record_file, args.show_art_frames)
    record_data.limit_percents(args.min_func_percent, args.min_callchain_percent)

    def filter_lib(lib_name):
        if not args.binary_filter:
            return True
        for binary in args.binary_filter:
            if binary in lib_name:
                return True
        return False
    if args.add_source_code:
        record_data.add_source_code(args.source_dirs, filter_lib)
    if args.add_disassembly:
        record_data.add_disassembly(filter_lib)

    # 3. Generate report html.
    report_generator = ReportGenerator(args.report_path)
    report_generator.write_script()
    report_generator.write_content_div()
    report_generator.write_record_data(record_data.gen_record_info())
    report_generator.finish()

    if not args.no_browser:
        open_report_in_browser(args.report_path)
    log_info("Report generated at '%s'." % args.report_path)