logger = logging.getLogger(__name__) # Nota: Molto spesso, il threshold di rifiuto influenza la distanza ottenuta anche quando non influenza # il tasso di successo dell'evasion. Questo è perché cambiare il threshold cambia la loss, cambiando # quindi il comportamento dell'attacco # Supporto per metrica diversa? @click.command() @click.argument('domain', type=click.Choice(parsing.domains)) @click.argument('architecture', type=click.Choice(parsing.architectures)) @click.argument('dataset') @click.argument('counter_attacks', callback=parsing.ParameterList(parsing.supported_attacks)) @click.argument('evasion_attacks', callback=parsing.ParameterList(parsing.supported_attacks)) @click.argument('p', type=click.Choice(parsing.distances), callback=parsing.validate_lp_distance) @click.argument('rejection_threshold', type=float) @click.argument('substitute_state_dict_paths', callback=parsing.ParameterList()) @click.option( '--state-dict-path', type=click.Path(exists=True, file_okay=True, dir_okay=False), default=None, help= 'The path to the state-dict file of the model. If None, a pretrained model will be used (if available).' )
import detectors import parsing import tests import utils logger = logging.getLogger(__name__) # Nota: In questo test, il rejection_threshold indica "se togli l'attacco corrispondente, quanto deve ottenere la detector pool per rifiutare?" @click.command() @click.argument('domain', type=click.Choice(parsing.domains)) @click.argument('architecture', type=click.Choice(parsing.architectures)) @click.argument('dataset') @click.argument('attacks', callback=parsing.ParameterList(parsing.supported_attacks)) @click.argument('p', type=click.Choice(parsing.distances), callback=parsing.validate_lp_distance) @click.argument('rejection_thresholds', callback=parsing.ParameterList(cast_to=float)) @click.argument('substitute_state_dict_paths', callback=parsing.ParameterList()) @click.option( '--state-dict-path', type=click.Path(exists=True, file_okay=True, dir_okay=False), default=None, help= 'The path to the state-dict file of the model. If None, a pretrained model will be used (if available).' ) @click.option(
import click import torch import parsing import tests import utils logger = logging.getLogger(__name__) @click.command() @click.argument('domain', type=click.Choice(parsing.domains)) @click.argument('architecture', type=click.Choice(parsing.architectures)) @click.argument('dataset') @click.argument('attacks', callback=parsing.ParameterList(parsing.supported_attacks)) @click.argument('p', type=click.Choice(parsing.distances), callback=parsing.validate_lp_distance) @click.option('--state-dict-path', type=click.Path(exists=True, file_okay=True, dir_okay=False), default=None, help='The path to the state-dict file of the model. If None, a pretrained model will be used (if available).') @click.option('--masked-relu', is_flag=True, help='If passed, all ReLU layers will be converted to MaskedReLU layers.') @click.option('--batch-size', type=click.IntRange(1, None), default=50, show_default=True, help='The batch size of the dataset.') @click.option('--device', default='cuda', show_default=True, help='The device where the model will be executed.') @click.option('--cpu-threads', type=click.IntRange(1, None, False), default=None, help='The number of PyTorch CPU threads. If unspecified, the default ' 'number is used (usually the number of cores).') @click.option('--attack-config-file', type=click.Path(exists=True, file_okay=True, dir_okay=False), default='default_attack_configuration.cfg', show_default=True, help='The path to the file containing the ' 'attack configuration.') @click.option('--misclassification-policy', type=click.Choice(parsing.misclassification_policies),
import detectors import parsing import tests import utils logger = logging.getLogger(__name__) # TODO: La CLI è scomoda quando devi passare valori negativi @click.command() @click.argument('domain', type=click.Choice(parsing.domains)) @click.argument('architecture', type=click.Choice(parsing.architectures)) @click.argument('dataset') @click.argument('attacks', callback=parsing.ParameterList(parsing.supported_attacks)) @click.argument('p', type=click.Choice(parsing.distances), callback=parsing.validate_lp_distance) @click.argument('rejection_thresholds', callback=parsing.ParameterList(cast_to=float)) @click.argument('substitute_state_dict_paths', callback=parsing.ParameterList()) @click.option('--state-dict-path', type=click.Path(exists=True, file_okay=True, dir_okay=False), default=None, help='The path to the state-dict file of the model. If None, a pretrained model will be used (if available).') @click.option('--masked-relu', is_flag=True, help='If passed, all ReLU layers will be converted to MaskedReLU layers.') @click.option('--batch-size', type=click.IntRange(1, None), default=50, show_default=True, help='The batch size of the dataset.') @click.option('--device', default='cuda', show_default=True, help='The device where the model will be executed.') @click.option('--cpu-threads', type=click.IntRange(1, None, False), default=None, help='The number of PyTorch CPU threads. If unspecified, the default ' 'number is used (usually the number of cores).') @click.option('--attack-config-file', type=click.Path(exists=True, file_okay=True, dir_okay=False), default='default_attack_configuration.cfg', show_default=True, help='The path to the file containing the '
all_success_rates.append(success_rate) all_differences_between_averages.append( difference_between_averages) all_r2s.append(r_value**2) return (np.mean(all_success_rates), np.std(all_success_rates)), ( np.mean(all_differences_between_averages), np.std(all_differences_between_averages)), (np.mean(all_r2s), np.std(all_r2s)) def pool_selector(pool): (success_mean, _), (_, _), (r2_mean, _) = pool return (success_mean, r2_mean) @click.command() @click.argument('domain') @click.argument('parameter_set') @click.argument('atol', type=float) @click.argument('rtol', type=float) @click.argument('attacks', callback=parsing.ParameterList()) @click.option('--test-override', type=str, default=None) def main(domain, parameter_set, atol, rtol, attacks, test_override): pool_result = get_r2(domain, parameter_set, atol, rtol, test_override, attacks) print(pool_result) if __name__ == '__main__': main()
@click.option('--rotation', type=float, default=0, show_default=True, help='Random rotation (in degrees) in range (-value, +value).') @click.option( '--translation', type=float, default=0, show_default=True, help= 'Random horizontal and vertical translation in range (-value * image_size, +value * image_size).' ) @click.option( '--adversarial-training', callback=parsing.ParameterList(parsing.epsilon_attacks), default=None, help='The adversarial attack that will be used to compute the adversarials. ' 'If unspecified, disables adversarial training. Requires specifying --adversarial-ratio, --adversarial-p ' 'and --adversarial-eps.') @click.option('--adversarial-ratio', type=float, default=None, help='The ratio of samples that are replaced with adversarials. ' 'Ignored if adversarial training is disabled. ' 'Required if adversarial training is enabled.') @click.option('--adversarial-p', type=click.Choice(parsing.distances), callback=parsing.validate_lp_distance, default=None, help='The Lp norm used for adversarial training. '