Beispiel #1
0
def read_config_file(args):
    # No config file was provided as an argument
    if args.config_file is None:
        # Check wether the default config file is present
        if os.path.exists("slither.config.json"):
            # The default file exists, use it
            args.config_file = "slither.config.json"
        else:
            return

    if os.path.isfile(args.config_file):
        try:
            with open(args.config_file, encoding="utf8") as f:
                config = json.load(f)
                for key, elem in config.items():
                    if key not in defaults_flag_in_config:
                        logger.info(
                            yellow(
                                f"{args.config_file} has an unknown key: {key} : {elem}"
                            ))
                        continue
                    if getattr(args, key) == defaults_flag_in_config[key]:
                        setattr(args, key, elem)
        except json.decoder.JSONDecodeError as e:
            logger.error(
                red(f"Impossible to read {args.config_file}, please check the file {e}"
                    ))
    else:
        logger.error(
            red(f"File {args.config_file} is not a file or does not exist"))
        logger.error(yellow("Falling back to the default settings..."))
Beispiel #2
0
def write_file(output_dir: Path,
               filename: str,
               content: str,
               allow_overwrite: bool = True,
               discard_if_exist: bool = False):
    """
    Write the content into output_dir/filename
    :param output_dir:
    :param filename:
    :param content:
    :param allow_overwrite: If true, allows to overwrite existing file (default: true). Emit warning if overwrites
    :param discard_if_exist: If true, it will not emit warning or overwrite the file if it exists, (default: False)
    :return:
    """
    file_to_write = Path(output_dir, filename)
    if file_to_write.exists():
        if discard_if_exist:
            return
        if not allow_overwrite:
            logger.info(
                yellow(
                    f'{file_to_write} already exist and will not be overwritten'
                ))
            return
        logger.info(yellow(f'Overwrite {file_to_write}'))
    else:
        logger.info(green(f'Write {file_to_write}'))
    with open(file_to_write, 'w') as f:
        f.write(content)
Beispiel #3
0
    def get_summary_erc20(self, contract):
        txt = ''

        pause, mint_limited, race_condition_mitigated = self._get_summary_erc20(
            contract)

        if pause:
            txt += "\t\t Can be paused? : {}\n".format(yellow('Yes'))
        else:
            txt += "\t\t Can be paused? : {}\n".format(green('No'))

        if mint_limited is None:
            txt += "\t\t Minting restriction? : {}\n".format(
                green('No Minting'))
        else:
            if mint_limited:
                txt += "\t\t Minting restriction? : {}\n".format(red('Yes'))
            else:
                txt += "\t\t Minting restriction? : {}\n".format(yellow('No'))

        if race_condition_mitigated:
            txt += "\t\t ERC20 race condition mitigation: {}\n".format(
                green('Yes'))
        else:
            txt += "\t\t ERC20 race condition mitigation: {}\n".format(
                red('No'))

        return txt
Beispiel #4
0
def output_to_json(filename: Optional[str], error, results: Dict) -> None:
    """

    :param filename: Filename where the json will be written. If None or "-", write to stdout
    :param error: Error to report
    :param results: Results to report
    :param logger: Logger where to log potential info
    :return:
    """
    # Create our encapsulated JSON result.
    json_result = {
        "success": error is None,
        "error": error,
        "results": results
    }

    if filename == "-":
        filename = None

    # Determine if we should output to stdout
    if filename is None:
        # Write json to console
        print(json.dumps(json_result))
    else:
        # Write json to file
        if os.path.isfile(filename):
            logger.info(
                yellow(
                    f"{filename} exists already, the overwrite is prevented"))
        else:
            with open(filename, "w", encoding="utf8") as f:
                json.dump(json_result, f, indent=2)
Beispiel #5
0
def output_to_zip(filename: str,
                  error: Optional[str],
                  results: Dict,
                  zip_type: str = "lzma"):
    """
    Output the results to a zip
    The file in the zip is named slither_results.json
    Note: the json file will not have indentation, as a result the resulting json file will be smaller
    :param zip_type:
    :param filename:
    :param error:
    :param results:
    :return:
    """
    json_result = {
        "success": error is None,
        "error": error,
        "results": results
    }
    if os.path.isfile(filename):
        logger.info(
            yellow(f"{filename} exists already, the overwrite is prevented"))
    else:
        with ZipFile(
                filename,
                "w",
                compression=ZIP_TYPES_ACCEPTED.get(zip_type, zipfile.ZIP_LZMA),
        ) as file_desc:
            file_desc.writestr("slither_results.json",
                               json.dumps(json_result).encode("utf8"))
Beispiel #6
0
 def detect(self):
     all_results = self._detect()
     results = []
     # only keep valid result, and remove dupplicate
     [results.append(r) for r in all_results if self.slither.valid_result(r) and r not in results]
     if results:
         if self.logger:
             info = '\n'
             for idx, result in enumerate(results):
                 if self.slither.triage_mode:
                     info += '{}: '.format(idx)
                 info += result['description']
             info += 'Reference: {}'.format(self.WIKI)
             self._log(info)
     if results and self.slither.triage_mode:
         while True:
             indexes = input('Results to hide during next runs: "0,1,..." or "All" (enter to not hide results): '.format(len(results)))
             if indexes == 'All':
                 self.slither.save_results_to_hide(results)
                 return []
             if indexes == '':
                 return results
             if indexes.startswith('['):
                 indexes = indexes[1:]
             if indexes.endswith(']'):
                 indexes = indexes[:-1]
             try:
                 indexes = [int(i) for i in indexes.split(',')]
                 self.slither.save_results_to_hide([r for (idx, r) in enumerate(results) if idx in indexes])
                 return [r for (idx, r) in enumerate(results) if idx not in indexes]
             except ValueError:
                 self.logger.error(yellow('Malformed input. Example of valid input: 0,1,2,3'))
     return results
Beispiel #7
0
def output_json(results, filename):
    if os.path.isfile(filename):
        logger.info(
            yellow(f'{filename} exists already, the overwrite is prevented'))
    else:
        with open(filename, 'w', encoding='utf8') as f:
            json.dump(results, f)
Beispiel #8
0
    def get_detectors_result(self):
        issues_informational, issues_low, issues_medium, issues_high = self._get_detectors_result()
        txt = "Number of informational issues: {}\n".format(green(issues_informational))
        txt += "Number of low issues: {}\n".format(green(issues_low))
        txt += "Number of medium issues: {}\n".format(yellow(issues_medium))
        txt += "Number of high issues: {}\n".format(red(issues_high))

        return txt
Beispiel #9
0
def slither_format(slither, **kwargs):  # pylint: disable=too-many-locals
    """'
    Keyword Args:
        detectors_to_run (str): Comma-separated list of detectors, defaults to all
    """

    detectors_to_run = choose_detectors(kwargs.get("detectors_to_run", "all"),
                                        kwargs.get("detectors_to_exclude", ""))

    for detector in detectors_to_run:
        slither.register_detector(detector)

    slither.generate_patches = True

    detector_results = slither.run_detectors()
    detector_results = [x for x in detector_results
                        if x]  # remove empty results
    detector_results = [
        item for sublist in detector_results for item in sublist
    ]  # flatten

    export = Path("crytic-export", "patches")

    export.mkdir(parents=True, exist_ok=True)

    counter_result = 0

    logger.info(
        yellow(
            "slither-format is in beta, carefully review each patch before merging it."
        ))

    for result in detector_results:
        if not "patches" in result:
            continue
        one_line_description = result["description"].split("\n")[0]

        export_result = Path(export, f"{counter_result}")
        export_result.mkdir(parents=True, exist_ok=True)
        counter_result += 1
        counter = 0

        logger.info(f"Issue: {one_line_description}")
        logger.info(f"Generated: ({export_result})")

        for (
                _,
                diff,
        ) in result["patches_diff"].items():
            filename = f"fix_{counter}.patch"
            path = Path(export_result, filename)
            logger.info(f"\t- {filename}")
            with open(path, "w") as f:
                f.write(diff)
            counter += 1
Beispiel #10
0
    def get_summary_erc20(self, contract):
        txt = ""

        pause, mint_unlimited, race_condition_mitigated = self._get_summary_erc20(contract)

        if pause:
            txt += yellow("Pausable") + "\n"

        if mint_unlimited is None:
            txt += green("No Minting") + "\n"
        else:
            if mint_unlimited:
                txt += red("∞ Minting") + "\n"
            else:
                txt += yellow("Minting") + "\n"

        if not race_condition_mitigated:
            txt += red("Approve Race Cond.") + "\n"

        return txt
Beispiel #11
0
def output_json(results, filename):
    json_result = wrap_json_detectors_results(True, None, results)
    if filename is None:
        # Write json to console
        print(json.dumps(json_result))
    else:
        # Write json to file
        if os.path.isfile(filename):
            logger.info(yellow(f'{filename} exists already, the overwrite is prevented'))
        else:
            with open(filename, 'w', encoding='utf8') as f:
                json.dump(json_result, f, indent=2)
Beispiel #12
0
def compare_variables_order_proxy(implem, implem_name, proxy, proxy_name):

    contract_implem = implem.get_contract_from_name(implem_name)
    if contract_implem is None:
        logger.info(
            red('Contract {} not found in {}'.format(implem_name,
                                                     implem.filename)))
        exit(-1)

    contract_proxy = proxy.get_contract_from_name(proxy_name)
    if contract_proxy is None:
        logger.info(
            red('Contract {} not found in {}'.format(proxy_name,
                                                     proxy.filename)))
        exit(-1)

    order_implem = [(variable.name, variable.type)
                    for variable in contract_implem.state_variables
                    if not variable.is_constant]
    order_proxy = [(variable.name, variable.type)
                   for variable in contract_proxy.state_variables
                   if not variable.is_constant]

    found = False
    for idx in range(0, len(order_proxy)):
        (proxy_name, proxy_type) = order_proxy[idx]
        if len(order_proxy) < idx:
            logger.info(
                red('Extra variable in the proxy: {} {}'.format(
                    proxy_name, proxy_type)))
            continue
        (implem_name, implem_type) = order_implem[idx]

        if (proxy_name != implem_name) or (proxy_type != implem_type):
            found = True
            logger.info(
                red('Different variables between proxy and implem: {} {} -> {} {}'
                    .format(proxy_name, proxy_type, implem_name, implem_type)))
        else:
            logger.info(
                yellow('Variable in the proxy: {} {}'.format(
                    proxy_name, proxy_type)))

    #if len(order_implem) > len(order_proxy):
    #    new_variables = order_implem[len(order_proxy):]
    #    for (name, t) in new_variables:
    #        logger.info(green('Variable only in implem: {} {}'.format(name, t)))

    if not found:
        logger.info(
            green(
                'No error found (variables ordering proxy <-> implementation)')
        )
Beispiel #13
0
def slither_format(slither, **kwargs):
    ''''
    Keyword Args:
        detectors_to_run (str): Comma-separated list of detectors, defaults to all
    '''

    detectors_to_run = choose_detectors(kwargs.get('detectors_to_run', 'all'),
                                        kwargs.get('detectors_to_exclude', ''))

    for detector in detectors_to_run:
        slither.register_detector(detector)

    slither.generate_patches = True

    detector_results = slither.run_detectors()
    detector_results = [x for x in detector_results
                        if x]  # remove empty results
    detector_results = [
        item for sublist in detector_results for item in sublist
    ]  # flatten

    export = Path('crytic-export', 'patches')

    export.mkdir(parents=True, exist_ok=True)

    counter_result = 0

    logger.info(
        yellow(
            'slither-format is in beta, carefully review each patch before merging it.'
        ))

    for result in detector_results:
        if not 'patches' in result:
            continue
        one_line_description = result["description"].split("\n")[0]

        export_result = Path(export, f'{counter_result}')
        export_result.mkdir(parents=True, exist_ok=True)
        counter_result += 1
        counter = 0

        logger.info(f'Issue: {one_line_description}')
        logger.info(f'Generated: ({export_result})')

        for file, diff, in result['patches_diff'].items():
            filename = f'fix_{counter}.patch'
            path = Path(export_result, filename)
            logger.info(f'\t- {filename}')
            with open(path, 'w') as f:
                f.write(diff)
            counter += 1
Beispiel #14
0
def check_initialization(s):

    initializable = s.get_contract_from_name('Initializable')

    logger.info(green('Run initialization checks... (see https://github.com/crytic/slither/wiki/Upgradeability-Checks#initialization-checks)'))

    if initializable is None:
        logger.info(yellow('Initializable contract not found, the contract does not follow a standard initalization schema.'))
        return

    init_info = ''

    double_calls_found = False
    missing_call = False
    initializer_modifier_missing = False

    for contract in s.contracts:
        if initializable in contract.inheritance:
            initializer = contract.get_modifier_from_canonical_name('Initializable.initializer()')
            all_init_functions = _get_initialize_functions(contract)
            for f in all_init_functions:
                if not initializer in f.modifiers:
                    initializer_modifier_missing = True
                    logger.info(red(f'{f.canonical_name} does not call initializer'))
            most_derived_init = _get_most_derived_init(contract)
            if most_derived_init is None:
                init_info += f'{contract.name} has no initialize function\n'
                continue
            else:
                init_info += f'{contract.name} needs to be initialized by {most_derived_init.full_name}\n'
            all_init_functions_called = _get_all_internal_calls(most_derived_init) + [most_derived_init]
            missing_calls = [f for f in all_init_functions if not f in all_init_functions_called]
            for f in missing_calls:
                logger.info(red(f'Missing call to {f.canonical_name} in {contract.name}'))
                missing_call = True
            double_calls = list(set([f for f in all_init_functions_called if all_init_functions_called.count(f) > 1]))
            for f in double_calls:
                logger.info(red(f'{f.canonical_name} is called multiple time in {contract.name}'))
                double_calls_found = True

    if not initializer_modifier_missing:
        logger.info(green('All the init functions have the initiliazer modifier'))

    if not double_calls_found:
        logger.info(green('No double call to init functions found'))

    if not missing_call:
        logger.info(green('No missing call to an init function found'))

    logger.info(green('Check the deployement script to ensure that these functions are called:\n'+ init_info))
Beispiel #15
0
def output_to_sarif(filename: Optional[str], results: Dict,
                    detectors_classes: List["AbstractDetector"]) -> None:
    """

    :param filename:
    :type filename:
    :param results:
    :type results:
    :return:
    :rtype:
    """

    sarif: Dict[str, Any] = {
        "$schema":
        "https://raw.githubusercontent.com/oasis-tcs/sarif-spec/master/Schemata/sarif-schema-2.1.0.json",
        "version":
        "2.1.0",
        "runs": [{
            "tool": {
                "driver": {
                    "name": "Slither",
                    "informationUri": "https://github.com/crytic/slither",
                    "version": require("slither-analyzer")[0].version,
                    "rules": [],
                }
            },
            "results": [],
        }],
    }

    for detector in results["detectors"]:
        _output_result_to_sarif(detector, detectors_classes, sarif)

    if filename == "-":
        filename = None

    # Determine if we should output to stdout
    if filename is None:
        # Write json to console
        print(json.dumps(sarif))
    else:
        # Write json to file
        if os.path.isfile(filename):
            logger.info(
                yellow(
                    f"{filename} exists already, the overwrite is prevented"))
        else:
            with open(filename, "w", encoding="utf8") as f:
                json.dump(sarif, f, indent=2)
Beispiel #16
0
    def get_detectors_result(self) -> Tuple[str, List[Dict],int, int, int, int, int]:
        all_results, optimization, informational, low, medium, high = self._get_detectors_result()
        txt = "Number of optimization issues: {}\n".format(green(optimization))
        txt += "Number of informational issues: {}\n".format(green(informational))
        txt += "Number of low issues: {}\n".format(green(low))
        if medium > 0:
            txt += "Number of medium issues: {}\n".format(yellow(medium))
        else:
            txt += "Number of medium issues: {}\n".format(green(medium))
        if high > 0:
            txt += "Number of high issues: {}\n".format(red(high))
        else:
            txt += "Number of high issues: {}\n\n".format(green(high))

        return txt, all_results, optimization, informational, low, medium, high
Beispiel #17
0
def read_config_file(args):
    if os.path.isfile(args.config_file):
        try:
            with open(args.config_file) as f:
                config = json.load(f)
                for key, elem in config.items():
                    if key not in defaults_flag_in_config:
                        logger.info(
                            yellow("{} has an unknown key: {} : {}".format(
                                args.config_file, key, elem)))
                        continue
                    if getattr(args, key) == defaults_flag_in_config[key]:
                        setattr(args, key, elem)
        except json.decoder.JSONDecodeError as e:
            logger.error(
                red("Impossible to read {}, please check the file {}".format(
                    args.config_file, e)))
Beispiel #18
0
def compare_variables_order_proxy(implem, implem_name, proxy, proxy_name):

    logger.info(green('Run variables order checks between the implementation and the proxy... (see https://github.com/crytic/slither/wiki/Upgradeability-Checks#variables-order-checks)'))

    contract_implem = implem.get_contract_from_name(implem_name)
    if contract_implem is None:
        logger.info(red('Contract {} not found in {}'.format(implem_name, implem.filename)))
        exit(-1)

    contract_proxy = proxy.get_contract_from_name(proxy_name)
    if contract_proxy is None:
        logger.info(red('Contract {} not found in {}'.format(proxy_name, proxy.filename)))
        exit(-1)


    order_implem = [(variable.name, variable.type) for variable in contract_implem.state_variables if not variable.is_constant]
    order_proxy = [(variable.name, variable.type) for variable in contract_proxy.state_variables if not variable.is_constant]


    found = False
    for idx in range(0, len(order_proxy)):
        (proxy_name, proxy_type) =  order_proxy[idx]
        if len(order_implem) <= idx:
            logger.info(red('Extra variable in the proxy: {} {}'.format(proxy_name, proxy_type)))
            continue
        (implem_name, implem_type) =  order_implem[idx]

        if (proxy_name != implem_name) or (proxy_type != implem_type):
            found = True
            logger.info(red('Different variables between proxy and implem: {} {} -> {} {}'.format(proxy_name,
                                                                        proxy_type,
                                                                        implem_name,
                                                                        implem_type)))
        else:
            logger.info(yellow('Variable in the proxy: {} {}'.format(proxy_name,
                                                                     proxy_type)))


    #if len(order_implem) > len(order_proxy):
    #    new_variables = order_implem[len(order_proxy):]
    #    for (name, t) in new_variables:
    #        logger.info(green('Variable only in implem: {} {}'.format(name, t)))

    if not found:
        logger.info(green('No variables ordering error found between implementation and the proxy'))
Beispiel #19
0
def output_json(filename, error, results):
    # Create our encapsulated JSON result.
    json_result = {
        "success": error is None,
        "error": error,
        "results": results
    }

    # Determine if we should output to stdout
    if filename is None:
        # Write json to console
        print(json.dumps(json_result))
    else:
        # Write json to file
        if os.path.isfile(filename):
            logger.info(
                yellow(
                    f'{filename} exists already, the overwrite is prevented'))
        else:
            with open(filename, 'w', encoding='utf8') as f:
                json.dump(json_result, f, indent=2)
Beispiel #20
0
    def detect(self):
        all_results = self._detect()
        # Keep only dictionaries
        all_results = [r.data for r in all_results]
        results = []
        # only keep valid result, and remove dupplicate
        # pylint: disable=expression-not-assigned
        [
            results.append(r) for r in all_results
            if self.slither.valid_result(r) and r not in results
        ]
        if results:
            if self.logger:
                info = "\n"
                for idx, result in enumerate(results):
                    if self.slither.triage_mode:
                        info += "{}: ".format(idx)
                    info += result["description"]
                info += "Reference: {}".format(self.WIKI)
                self._log(info)
        if self.slither.generate_patches:
            for result in results:
                try:
                    self._format(self.slither, result)
                    if not "patches" in result:
                        continue
                    result["patches_diff"] = dict()
                    for file in result["patches"]:
                        original_txt = self.slither.source_code[file].encode(
                            "utf8")
                        patched_txt = original_txt
                        offset = 0
                        patches = result["patches"][file]
                        patches.sort(key=lambda x: x["start"])
                        if not all(patches[i]["end"] <= patches[i + 1]["end"]
                                   for i in range(len(patches) - 1)):
                            self._log(
                                f"Impossible to generate patch; patches collisions: {patches}"
                            )
                            continue
                        for patch in patches:
                            patched_txt, offset = apply_patch(
                                patched_txt, patch, offset)
                        diff = create_diff(self.slither, original_txt,
                                           patched_txt, file)
                        if not diff:
                            self._log(
                                f"Impossible to generate patch; empty {result}"
                            )
                        else:
                            result["patches_diff"][file] = diff

                except FormatImpossible as exception:
                    self._log(
                        f'\nImpossible to patch:\n\t{result["description"]}\t{exception}'
                    )

        if results and self.slither.triage_mode:
            while True:
                indexes = input(
                    'Results to hide during next runs: "0,1,...,{}" or "All" (enter to not hide results): '
                    .format(len(results)))
                if indexes == "All":
                    self.slither.save_results_to_hide(results)
                    return []
                if indexes == "":
                    return results
                if indexes.startswith("["):
                    indexes = indexes[1:]
                if indexes.endswith("]"):
                    indexes = indexes[:-1]
                try:
                    indexes = [int(i) for i in indexes.split(",")]
                    self.slither.save_results_to_hide([
                        r for (idx, r) in enumerate(results) if idx in indexes
                    ])
                    return [
                        r for (idx, r) in enumerate(results)
                        if idx not in indexes
                    ]
                except ValueError:
                    self.logger.error(
                        yellow(
                            "Malformed input. Example of valid input: 0,1,2,3")
                    )
        return results
Beispiel #21
0
    def detect(self) -> List[Dict]:
        results: List[Dict] = []
        # only keep valid result, and remove dupplicate
        # Keep only dictionaries
        for r in [output.data for output in self._detect()]:
            if self.compilation_unit.core.valid_result(r) and r not in results:
                results.append(r)
        if results and self.logger:
            self._log_result(results)
        if self.compilation_unit.core.generate_patches:
            for result in results:
                try:
                    self._format(self.compilation_unit, result)
                    if not "patches" in result:
                        continue
                    result["patches_diff"] = {}
                    for file in result["patches"]:
                        original_txt = self.compilation_unit.core.source_code[
                            file].encode("utf8")
                        patched_txt = original_txt
                        offset = 0
                        patches = result["patches"][file]
                        patches.sort(key=lambda x: x["start"])
                        if not all(patches[i]["end"] <= patches[i + 1]["end"]
                                   for i in range(len(patches) - 1)):
                            self._log(
                                f"Impossible to generate patch; patches collisions: {patches}"
                            )
                            continue
                        for patch in patches:
                            patched_txt, offset = apply_patch(
                                patched_txt, patch, offset)
                        diff = create_diff(self.compilation_unit, original_txt,
                                           patched_txt, file)
                        if not diff:
                            self._log(
                                f"Impossible to generate patch; empty {result}"
                            )
                        else:
                            result["patches_diff"][file] = diff

                except FormatImpossible as exception:
                    self._log(
                        f'\nImpossible to patch:\n\t{result["description"]}\t{exception}'
                    )

        if results and self.slither.triage_mode:
            while True:
                indexes = input(
                    f'Results to hide during next runs: "0,1,...,{len(results)}" or "All" (enter to not hide results): '
                )
                if indexes == "All":
                    self.slither.save_results_to_hide(results)
                    return []
                if indexes == "":
                    return results
                if indexes.startswith("["):
                    indexes = indexes[1:]
                if indexes.endswith("]"):
                    indexes = indexes[:-1]
                try:
                    indexes_converted = [int(i) for i in indexes.split(",")]
                    self.slither.save_results_to_hide([
                        r for (idx, r) in enumerate(results)
                        if idx in indexes_converted
                    ])
                    return [
                        r for (idx, r) in enumerate(results)
                        if idx not in indexes_converted
                    ]
                except ValueError:
                    self.logger.error(
                        yellow(
                            "Malformed input. Example of valid input: 0,1,2,3")
                    )
        results = sorted(results, key=lambda x: x["id"])

        return results
def compare_variables_order(contract1, contract2, missing_variable_check=True):

    results = {
        'missing_variables': [],
        'different-variables': [],
        'extra-variables': []
    }

    logger.info(
        green(
            f'\n## Run variables ordering checks between {contract1.name} and {contract2.name}... (see https://github.com/crytic/slither/wiki/Upgradeability-Checks#variables-order-checks)'
        ))

    order1 = [
        variable for variable in contract1.state_variables
        if not variable.is_constant
    ]
    order2 = [
        variable for variable in contract2.state_variables
        if not variable.is_constant
    ]

    error_found = False
    idx = 0
    for idx in range(0, len(order1)):
        variable1 = order1[idx]
        if len(order2) <= idx:
            if missing_variable_check:
                info = f'Variable only in {contract1.name}: {variable1.name} ({variable1.source_mapping_str})'
                logger.info(yellow(info))

                res = Output(info)
                res.add(variable1)
                results['missing_variables'].append(res.data)

                error_found = True
            continue

        variable2 = order2[idx]

        if (variable1.name != variable2.name) or (variable1.type !=
                                                  variable2.type):
            info = f'Different variables between {contract1.name} and {contract2.name}:\n'
            info += f'\t Variable {idx} in {contract1.name}: {variable1.name} {variable1.type} ({variable1.source_mapping_str})\n'
            info += f'\t Variable {idx} in {contract2.name}: {variable2.name} {variable2.type} ({variable2.source_mapping_str})\n'
            logger.info(red(info))

            res = Output(info, additional_fields={'index': idx})
            res.add(variable1)
            res.add(variable2)
            results['different-variables'].append(res.data)

            error_found = True

    idx = idx + 1

    while idx < len(order2):
        variable2 = order2[idx]

        info = f'Extra variables in {contract2.name}: {variable2.name} ({variable2.source_mapping_str})\n'
        logger.info(yellow(info))
        res = Output(info, additional_fields={'index': idx})
        res.add(variable2)
        results['extra-variables'].append(res.data)
        idx = idx + 1

    if not error_found:
        logger.info(green('No error found'))

    return results
Beispiel #23
0
def check_initialization(contract):

    results = {
        'Initializable-present': False,
        'Initializable-inherited': False,
        'Initializable.initializer()-present': False,
        'missing-initializer-modifier': [],
        'initialize_target': {},
        'missing-calls': [],
        'multiple-calls': []
    }

    error_found = False

    logger.info(
        green(
            '\n## Run initialization checks... (see https://github.com/crytic/slither/wiki/Upgradeability-Checks#initialization-checks)'
        ))

    # Check if the Initializable contract is present
    initializable = contract.slither.get_contract_from_name('Initializable')
    if initializable is None:
        logger.info(
            yellow(
                'Initializable contract not found, the contract does not follow a standard initalization schema.'
            ))
        return results
    results['Initializable-present'] = True

    # Check if the Initializable contract is inherited
    if initializable not in contract.inheritance:
        logger.info(
            yellow('The logic contract does not call the initializer.'))
        return results
    results['Initializable-inherited'] = True

    # Check if the Initializable contract is inherited
    initializer = contract.get_modifier_from_canonical_name(
        'Initializable.initializer()')
    if initializer is None:
        logger.info(yellow('Initializable.initializer() does not exist'))
        return results
    results['Initializable.initializer()-present'] = True

    # Check if a init function lacks the initializer modifier
    initializer_modifier_missing = False
    all_init_functions = _get_initialize_functions(contract)
    for f in all_init_functions:
        if not initializer in f.modifiers:
            initializer_modifier_missing = True
            info = f'{f.canonical_name} does not call the initializer modifier'
            logger.info(red(info))
            res = Output(info)
            res.add(f)
            results['missing-initializer-modifier'].append(res.data)

    if not initializer_modifier_missing:
        logger.info(
            green('All the init functions have the initializer modifier'))

    # Check if we can determine the initialize function that will be called
    # TODO: handle MultipleInitTarget
    try:
        most_derived_init = _get_most_derived_init(contract)
    except MultipleInitTarget:
        logger.info(red('Too many init targets'))
        return results

    if most_derived_init is None:
        init_info = f'{contract.name} has no initialize function\n'
        logger.info(green(init_info))
        results['initialize_target'] = {}
        return results
    # results['initialize_target'] is set at the end, as we want to print it last

    # Check if an initialize function is not called from the most_derived_init function
    missing_call = False
    all_init_functions_called = _get_all_internal_calls(most_derived_init) + [
        most_derived_init
    ]
    missing_calls = [
        f for f in all_init_functions if not f in all_init_functions_called
    ]
    for f in missing_calls:
        info = f'Missing call to {f.canonical_name} in {most_derived_init.canonical_name}'
        logger.info(red(info))
        res = Output(info)
        res.add(f, {"is_most_derived_init_function": False})
        res.add(most_derived_init, {"is_most_derived_init_function": True})
        results['missing-calls'].append(res.data)
        missing_call = True
    if not missing_call:
        logger.info(green('No missing call to an init function found'))

    # Check if an init function is called multiple times
    double_calls = list(
        set([
            f for f in all_init_functions_called
            if all_init_functions_called.count(f) > 1
        ]))
    double_calls_found = False
    for f in double_calls:
        info = f'{f.canonical_name} is called multiple times in {most_derived_init.full_name}'
        logger.info(red(info))
        res = Output(info)
        res.add(f)
        results['multiple-calls'].append(res.data)
        double_calls_found = True
    if not double_calls_found:
        logger.info(green('No double call to init functions found'))

    # Print the initialize_target info

    init_info = f'{contract.name} needs to be initialized by {most_derived_init.full_name}\n'
    logger.info(
        green(
            'Check the deployement script to ensure that these functions are called:\n'
            + init_info))
    res = Output(init_info)
    res.add(most_derived_init)
    results['initialize_target'] = res.data

    if not error_found:
        logger.info(green('No error found'))

    return results
Beispiel #24
0
def constant_conformance_check(contract_v1, contract_v2):

    results = {
        "became_constants": [],
        "were_constants": [],
        "not_found_in_v2": [],
    }

    logger.info(
        green(
            '\n## Run variable constants conformance check... (see https://github.com/crytic/slither/wiki/Upgradeability-Checks)'
        ))
    error_found = False

    state_variables_v1 = contract_v1.state_variables
    state_variables_v2 = contract_v2.state_variables

    v2_additional_variables = len(state_variables_v2) - len(state_variables_v1)
    if v2_additional_variables < 0:
        v2_additional_variables = 0

    # We keep two index, because we need to have them out of sync if v2
    # has additional non constant variables
    idx_v1 = 0
    idx_v2 = 0
    while idx_v1 < len(state_variables_v1):

        state_v1 = contract_v1.state_variables[idx_v1]
        if len(state_variables_v2) <= idx_v2:
            break

        state_v2 = contract_v2.state_variables[idx_v2]

        if state_v2:
            if state_v1.is_constant:
                if not state_v2.is_constant:

                    # If v2 has additional non constant variables, we need to skip them
                    if (state_v1.name != state_v2.name or state_v1.type !=
                            state_v2.type) and v2_additional_variables > 0:
                        v2_additional_variables -= 1
                        idx_v2 += 1
                        continue

                    info = f'{state_v1.canonical_name} ({state_v1.source_mapping_str}) was constant and {state_v2.canonical_name} is not ({state_v2.source_mapping_str})'
                    logger.info(red(info))

                    res = Output(info)
                    res.add(state_v1)
                    res.add(state_v2)
                    results['were_constants'].append(res.data)
                    error_found = True

            elif state_v2.is_constant:
                info = f'{state_v1.canonical_name} ({state_v1.source_mapping_str}) was not constant but {state_v2.canonical_name} is ({state_v2.source_mapping_str})'
                logger.info(red(info))

                res = Output(info)
                res.add(state_v1)
                res.add(state_v2)
                results['became_constants'].append(res.data)
                error_found = True

        else:
            info = f'{state_v1.canonical_name} not found in {contract_v2.name}, not check was done'
            logger.info(yellow(info))

            res = Output(info)
            res.add(state_v1)
            res.add(contract_v2)
            results['not_found_in_v2'].append(res.data)

            error_found = True

        idx_v1 += 1
        idx_v2 += 1

    if not error_found:
        logger.info(green('No error found'))

    return results
def parse_args(detector_classes, printer_classes):
    parser = argparse.ArgumentParser(
        description=
        'Slither. For usage information, see https://github.com/crytic/slither/wiki/Usage',
        usage="slither.py contract.sol [flag]")

    parser.add_argument('filename', help='contract.sol')

    parser.add_argument('--version',
                        help='displays the current version',
                        version=require('slither-analyzer')[0].version,
                        action='version')

    group_detector = parser.add_argument_group('Detectors')
    group_printer = parser.add_argument_group('Printers')
    group_solc = parser.add_argument_group('Solc options')
    group_truffle = parser.add_argument_group('Truffle options')
    group_embark = parser.add_argument_group('Embark options')
    group_misc = parser.add_argument_group('Additional option')

    group_detector.add_argument(
        '--detect',
        help='Comma-separated list of detectors, defaults to all, '
        'available detectors: {}'.format(', '.join(d.ARGUMENT
                                                   for d in detector_classes)),
        action='store',
        dest='detectors_to_run',
        default=defaults_flag_in_config['detectors_to_run'])

    group_printer.add_argument(
        '--print',
        help='Comma-separated list fo contract information printers, '
        'available printers: {}'.format(', '.join(d.ARGUMENT
                                                  for d in printer_classes)),
        action='store',
        dest='printers_to_run',
        default=defaults_flag_in_config['printers_to_run'])

    group_detector.add_argument('--list-detectors',
                                help='List available detectors',
                                action=ListDetectors,
                                nargs=0,
                                default=False)

    group_printer.add_argument('--list-printers',
                               help='List available printers',
                               action=ListPrinters,
                               nargs=0,
                               default=False)

    group_detector.add_argument(
        '--exclude',
        help='Comma-separated list of detectors that should be excluded',
        action='store',
        dest='detectors_to_exclude',
        default=defaults_flag_in_config['detectors_to_exclude'])

    group_detector.add_argument(
        '--exclude-informational',
        help='Exclude informational impact analyses',
        action='store_true',
        default=defaults_flag_in_config['exclude_informational'])

    group_detector.add_argument('--exclude-low',
                                help='Exclude low impact analyses',
                                action='store_true',
                                default=defaults_flag_in_config['exclude_low'])

    group_detector.add_argument(
        '--exclude-medium',
        help='Exclude medium impact analyses',
        action='store_true',
        default=defaults_flag_in_config['exclude_medium'])

    group_detector.add_argument(
        '--exclude-high',
        help='Exclude high impact analyses',
        action='store_true',
        default=defaults_flag_in_config['exclude_high'])

    group_solc.add_argument('--solc',
                            help='solc path',
                            action='store',
                            default=defaults_flag_in_config['solc'])

    group_solc.add_argument(
        '--solc-args',
        help=
        'Add custom solc arguments. Example: --solc-args "--allow-path /tmp --evm-version byzantium".',
        action='store',
        default=defaults_flag_in_config['solc_args'])

    group_solc.add_argument(
        '--disable-solc-warnings',
        help='Disable solc warnings',
        action='store_true',
        default=defaults_flag_in_config['disable_solc_warnings'])

    group_solc.add_argument('--solc-ast',
                            help='Provide the ast solc file',
                            action='store_true',
                            default=False)

    group_truffle.add_argument(
        '--truffle-ignore-compile',
        help='Do not run truffle compile',
        action='store_true',
        dest='truffle_ignore_compile',
        default=defaults_flag_in_config['truffle_ignore_compile'])

    group_truffle.add_argument(
        '--truffle-build-directory',
        help='Use an alternative truffle build directory',
        action='store',
        dest='truffle_build_directory',
        default=defaults_flag_in_config['truffle_build_directory'])

    group_truffle.add_argument(
        '--truffle-version',
        help='Use a local Truffle version (with npx)',
        action='store',
        default=defaults_flag_in_config['truffle_version'])

    group_embark.add_argument(
        '--embark-ignore-compile',
        help='Do not run embark build',
        action='store_true',
        dest='embark_ignore_compile',
        default=defaults_flag_in_config['embark_ignore_compile'])

    group_embark.add_argument(
        '--embark-overwrite-config',
        help=
        'Install @trailofbits/embark-contract-export and add it to embark.json',
        action='store_true',
        default=defaults_flag_in_config['embark_overwrite_config'])

    group_misc.add_argument('--json',
                            help='Export results as JSON',
                            action='store',
                            default=defaults_flag_in_config['json'])

    group_misc.add_argument('--disable-color',
                            help='Disable output colorization',
                            action='store_true',
                            default=defaults_flag_in_config['disable_color'])

    group_misc.add_argument(
        '--filter-paths',
        help='Comma-separated list of paths for which results will be excluded',
        action='store',
        dest='filter_paths',
        default=defaults_flag_in_config['filter_paths'])

    group_misc.add_argument(
        '--triage-mode',
        help='Run triage mode (save results in slither.db.json)',
        action='store_true',
        dest='triage_mode',
        default=False)

    group_misc.add_argument(
        '--config-file',
        help='Provide a config file (default: slither.config.json)',
        action='store',
        dest='config_file',
        default='slither.config.json')

    # debugger command
    parser.add_argument('--debug',
                        help=argparse.SUPPRESS,
                        action="store_true",
                        default=False)

    parser.add_argument('--markdown',
                        help=argparse.SUPPRESS,
                        action=OutputMarkdown,
                        default=False)

    group_misc.add_argument('--checklist',
                            help=argparse.SUPPRESS,
                            action='store_true',
                            default=False)

    parser.add_argument('--wiki-detectors',
                        help=argparse.SUPPRESS,
                        action=OutputWiki,
                        default=False)

    parser.add_argument('--list-detectors-json',
                        help=argparse.SUPPRESS,
                        action=ListDetectorsJson,
                        nargs=0,
                        default=False)

    parser.add_argument('--legacy-ast',
                        help=argparse.SUPPRESS,
                        action='store_true',
                        default=defaults_flag_in_config['legacy_ast'])

    parser.add_argument('--ignore-return-value',
                        help=argparse.SUPPRESS,
                        action='store_true',
                        default=False)

    # if the json is splitted in different files
    parser.add_argument('--splitted',
                        help=argparse.SUPPRESS,
                        action='store_true',
                        default=False)

    if len(sys.argv) == 1:
        parser.print_help(sys.stderr)
        sys.exit(1)

    args = parser.parse_args()

    if os.path.isfile(args.config_file):
        try:
            with open(args.config_file) as f:
                config = json.load(f)
                for key, elem in config.items():
                    if key not in defaults_flag_in_config:
                        logger.info(
                            yellow('{} has an unknown key: {} : {}'.format(
                                args.config_file, key, elem)))
                        continue
                    if getattr(args, key) == defaults_flag_in_config[key]:
                        setattr(args, key, elem)
        except json.decoder.JSONDecodeError as e:
            logger.error(
                red('Impossible to read {}, please check the file {}'.format(
                    args.config_file, e)))

    return args
Beispiel #26
0
def _output_result_to_sarif(detector: Dict,
                            detectors_classes: List["AbstractDetector"],
                            sarif: Dict) -> None:
    confidence = "very-high"
    if detector["confidence"] == "Medium":
        confidence = "high"
    elif detector["confidence"] == "Low":
        confidence = "medium"
    elif detector["confidence"] == "Informational":
        confidence = "low"

    risk = "0.0"
    if detector["impact"] == "High":
        risk = "8.0"
    elif detector["impact"] == "Medium":
        risk = "4.0"
    elif detector["impact"] == "Low":
        risk = "3.0"

    detector_class = next(
        (d for d in detectors_classes if d.ARGUMENT == detector["check"]))
    check_id = (str(detector_class.IMPACT.value) + "-" +
                str(detector_class.CONFIDENCE.value) + "-" + detector["check"])

    rule = {
        "id": check_id,
        "name": detector["check"],
        "properties": {
            "precision": confidence,
            "security-severity": risk
        },
        "shortDescription": {
            "text": detector_class.WIKI_TITLE
        },
        "help": {
            "text": detector_class.WIKI_RECOMMENDATION
        },
    }
    # Add the rule if does not exist yet
    if len([
            x for x in sarif["runs"][0]["tool"]["driver"]["rules"]
            if x["id"] == check_id
    ]) == 0:
        sarif["runs"][0]["tool"]["driver"]["rules"].append(rule)

    if not detector["elements"]:
        logger.info(
            yellow(
                "Cannot generate Github security alert for finding without location"
            ))
        logger.info(yellow(detector["description"]))
        logger.info(
            yellow("This will be supported in a future Slither release"))
        return

    # From 3.19.10 (http://docs.oasis-open.org/sarif/sarif/v2.0/csprd01/sarif-v2.0-csprd01.html)
    # The locations array SHALL NOT contain more than one element unless the condition indicated by the result,
    # if any, can only be corrected by making a change at every location specified in the array.
    finding = detector["elements"][0]
    path = finding["source_mapping"]["filename_relative"]
    start_line = finding["source_mapping"]["lines"][0]
    end_line = finding["source_mapping"]["lines"][-1]

    sarif["runs"][0]["results"].append({
        "ruleId":
        check_id,
        "message": {
            "text": detector["description"],
            "markdown": detector["markdown"]
        },
        "level":
        "warning",
        "locations": [{
            "physicalLocation": {
                "artifactLocation": {
                    "uri": path
                },
                "region": {
                    "startLine": start_line,
                    "endLine": end_line
                },
            }
        }],
        "partialFingerprints": {
            "id": detector["id"]
        },
    })
Beispiel #27
0
def parse_args(detector_classes, printer_classes):
    parser = argparse.ArgumentParser(
        description=
        'Slither. For usage information, see https://github.com/crytic/slither/wiki/Usage',
        usage="slither.py contract.sol [flag]")

    parser.add_argument('filename', help='contract.sol')

    cryticparser.init(parser)

    parser.add_argument('--version',
                        help='displays the current version',
                        version=require('slither-analyzer')[0].version,
                        action='version')

    group_detector = parser.add_argument_group('Detectors')
    group_printer = parser.add_argument_group('Printers')
    group_misc = parser.add_argument_group('Additional option')

    group_detector.add_argument(
        '--detect',
        help='Comma-separated list of detectors, defaults to all, '
        'available detectors: {}'.format(', '.join(d.ARGUMENT
                                                   for d in detector_classes)),
        action='store',
        dest='detectors_to_run',
        default=defaults_flag_in_config['detectors_to_run'])

    group_printer.add_argument(
        '--print',
        help='Comma-separated list fo contract information printers, '
        'available printers: {}'.format(', '.join(d.ARGUMENT
                                                  for d in printer_classes)),
        action='store',
        dest='printers_to_run',
        default=defaults_flag_in_config['printers_to_run'])

    group_detector.add_argument('--list-detectors',
                                help='List available detectors',
                                action=ListDetectors,
                                nargs=0,
                                default=False)

    group_printer.add_argument('--list-printers',
                               help='List available printers',
                               action=ListPrinters,
                               nargs=0,
                               default=False)

    group_detector.add_argument(
        '--exclude',
        help='Comma-separated list of detectors that should be excluded',
        action='store',
        dest='detectors_to_exclude',
        default=defaults_flag_in_config['detectors_to_exclude'])

    group_detector.add_argument(
        '--exclude-informational',
        help='Exclude informational impact analyses',
        action='store_true',
        default=defaults_flag_in_config['exclude_informational'])

    group_detector.add_argument('--exclude-low',
                                help='Exclude low impact analyses',
                                action='store_true',
                                default=defaults_flag_in_config['exclude_low'])

    group_detector.add_argument(
        '--exclude-medium',
        help='Exclude medium impact analyses',
        action='store_true',
        default=defaults_flag_in_config['exclude_medium'])

    group_detector.add_argument(
        '--exclude-high',
        help='Exclude high impact analyses',
        action='store_true',
        default=defaults_flag_in_config['exclude_high'])

    group_misc.add_argument(
        '--json',
        help=
        'Export the results as a JSON file ("--json -" to export to stdout)',
        action='store',
        default=defaults_flag_in_config['json'])

    group_misc.add_argument('--disable-color',
                            help='Disable output colorization',
                            action='store_true',
                            default=defaults_flag_in_config['disable_color'])

    group_misc.add_argument(
        '--filter-paths',
        help='Comma-separated list of paths for which results will be excluded',
        action='store',
        dest='filter_paths',
        default=defaults_flag_in_config['filter_paths'])

    group_misc.add_argument(
        '--triage-mode',
        help='Run triage mode (save results in slither.db.json)',
        action='store_true',
        dest='triage_mode',
        default=False)

    group_misc.add_argument(
        '--config-file',
        help='Provide a config file (default: slither.config.json)',
        action='store',
        dest='config_file',
        default='slither.config.json')

    group_misc.add_argument('--solc-ast',
                            help='Provide the contract as a json AST',
                            action='store_true',
                            default=False)

    # debugger command
    parser.add_argument('--debug',
                        help=argparse.SUPPRESS,
                        action="store_true",
                        default=False)

    parser.add_argument('--markdown',
                        help=argparse.SUPPRESS,
                        action=OutputMarkdown,
                        default=False)

    group_misc.add_argument('--checklist',
                            help=argparse.SUPPRESS,
                            action='store_true',
                            default=False)

    parser.add_argument('--wiki-detectors',
                        help=argparse.SUPPRESS,
                        action=OutputWiki,
                        default=False)

    parser.add_argument('--list-detectors-json',
                        help=argparse.SUPPRESS,
                        action=ListDetectorsJson,
                        nargs=0,
                        default=False)

    parser.add_argument('--legacy-ast',
                        help=argparse.SUPPRESS,
                        action='store_true',
                        default=defaults_flag_in_config['legacy_ast'])

    parser.add_argument('--ignore-return-value',
                        help=argparse.SUPPRESS,
                        action='store_true',
                        default=defaults_flag_in_config['ignore_return_value'])

    # if the json is splitted in different files
    parser.add_argument('--splitted',
                        help=argparse.SUPPRESS,
                        action='store_true',
                        default=False)

    if len(sys.argv) == 1:
        parser.print_help(sys.stderr)
        sys.exit(1)

    args = parser.parse_args()

    if os.path.isfile(args.config_file):
        try:
            with open(args.config_file) as f:
                config = json.load(f)
                for key, elem in config.items():
                    if key not in defaults_flag_in_config:
                        logger.info(
                            yellow('{} has an unknown key: {} : {}'.format(
                                args.config_file, key, elem)))
                        continue
                    if getattr(args, key) == defaults_flag_in_config[key]:
                        setattr(args, key, elem)
        except json.decoder.JSONDecodeError as e:
            logger.error(
                red('Impossible to read {}, please check the file {}'.format(
                    args.config_file, e)))

    return args
Beispiel #28
0
    def detect(self):
        all_results = self._detect()
        results = []
        # only keep valid result, and remove dupplicate
        [
            results.append(r) for r in all_results
            if self.slither.valid_result(r) and r not in results
        ]
        if results:
            if self.logger:
                info = '\n'
                for idx, result in enumerate(results):
                    if self.slither.triage_mode:
                        info += '{}: '.format(idx)
                    info += result['description']
                info += 'Reference: {}'.format(self.WIKI)
                self._log(info)
        if self.slither.generate_patches:
            for result in results:
                try:
                    self._format(self.slither, result)
                    if not 'patches' in result:
                        continue
                    result['patches_diff'] = dict()
                    for file in result['patches']:
                        original_txt = self.slither.source_code[file].encode(
                            'utf8')
                        patched_txt = original_txt
                        offset = 0
                        patches = result['patches'][file]
                        patches.sort(key=lambda x: x['start'])
                        if not all(patches[i]['end'] <= patches[i + 1]['end']
                                   for i in range(len(patches) - 1)):
                            self._log(
                                f'Impossible to generate patch; patches collisions: {patches}'
                            )
                            continue
                        for patch in patches:
                            patched_txt, offset = apply_patch(
                                patched_txt, patch, offset)
                        diff = create_diff(self.slither, original_txt,
                                           patched_txt, file)
                        if not diff:
                            self._log(
                                f'Impossible to generate patch; empty {result}'
                            )
                        else:
                            result['patches_diff'][file] = diff

                except FormatImpossible as e:
                    self._log(
                        f'\nImpossible to patch:\n\t{result["description"]}\t{e}'
                    )

        if results and self.slither.triage_mode:
            while True:
                indexes = input(
                    'Results to hide during next runs: "0,1,..." or "All" (enter to not hide results): '
                    .format(len(results)))
                if indexes == 'All':
                    self.slither.save_results_to_hide(results)
                    return []
                if indexes == '':
                    return results
                if indexes.startswith('['):
                    indexes = indexes[1:]
                if indexes.endswith(']'):
                    indexes = indexes[:-1]
                try:
                    indexes = [int(i) for i in indexes.split(',')]
                    self.slither.save_results_to_hide([
                        r for (idx, r) in enumerate(results) if idx in indexes
                    ])
                    return [
                        r for (idx, r) in enumerate(results)
                        if idx not in indexes
                    ]
                except ValueError:
                    self.logger.error(
                        yellow(
                            'Malformed input. Example of valid input: 0,1,2,3')
                    )
        return results