def start_gpu_thread(part_csv):
    args['datalist'] = part_csv
    attacker = Attacker(transform, img2tensor, args)
    img_pairs = pd.read_csv(args['datalist'])

    for idx in tqdm(img_pairs.index.values):
        pair_dict = {'source': img_pairs.loc[idx].source_imgs.split('|'),
                     'target': img_pairs.loc[idx].target_imgs.split('|')}
        attacker.attack_method = attacker.FoolBox
        attacker.attack(pair_dict)
def start_gpu_thread(part_csv, logdir):
    args['logdir'] = logdir
    args['datalist'] = part_csv
    if os.path.exists(args['logdir']):
        shutil.rmtree(args['logdir'])
        # exit(1)
    attacker = Attacker(transform, img2tensor, args)
    img_pairs = pd.read_csv(args['datalist'])

    for idx in tqdm(img_pairs.index.values):
        pair_dict = {
            'source': img_pairs.loc[idx].source_imgs.split('|'),
            'target': img_pairs.loc[idx].target_imgs.split('|')
        }
        attacker.attack_method = attacker.M_DI_2_FGSM
        attacker.attack(pair_dict)
Ejemplo n.º 3
0
def trainAndAttackRaw(classifier=None,
                      train_X=None,
                      train_y=None,
                      audioPath=None):
    try:
        print("Imported data " + str(len(train_X)) + " and " +
              str(len(train_y)))
        testData = Data()
        test_X, test_y = testData.process(audioPath, attack=True, peak=True)
        model_creator = Model(classifier=classifier)
        print("Imported data " + str(len(train_X)) + " and " +
              str(len(train_y)))
        model_creator.fit_data(train_X, train_y)
        attacker = Attacker()
        attacker.attack(X=test_X, y=test_y, model=model_creator)
    except Exception as e:
        print("Exception in train and attack raw: ", e.__cause__)
        traceback.print_exc(file=sys.stdout)
Ejemplo n.º 4
0
def trainAndAttack(classifier=None,
                   train_X=None,
                   train_y=None,
                   test_X=None,
                   test_y=None):
    try:
        model_creator = Model(classifier=classifier)
        model_creator.fit_data(train_X, train_y)
        predictions = model_creator.predict(test_X)
        accuracy = accuracy_score(test_y, predictions)
        print("Accuracy for " + str(classifier) + " : " + str(accuracy))
        attacker = Attacker()
        attacker.attack(X=test_X, y=test_y, model=model_creator)
    except Exception as e:
        print(
            "Exception in train and attack: ",
            e,
        )
Ejemplo n.º 5
0
class Trainer:
    def __init__(self, config):
        self.config = config
        self.ckpt_dir = config.ckpt_dir
        if not os.path.exists(self.ckpt_dir):
            os.makedirs(self.ckpt_dir)

        self.save_config(config)
        self.timer = Timer()

        self.lr = config.lr
        self.datasets, self.loaders = get_loader(config.batch_size,
                                                 config.aug,
                                                 data=config.data)
        self.epochs = config.epochs
        self.start_epoch = 0

        self.is_ode = 'resnet' not in self.config.model
        self.model = ModelSelector[config.model](**{
            'channel': config.channel,
            'num_blocks': config.nb,
            'strides': config.strides,
            'solver': config.solver,
            'adjoint': config.adj,
            'tol': config.tol,
            'num_classes': config.nc
        })
        if self.config.use_gpu:
            self.model = nn.DataParallel(self.model).cuda()

        self.attacker = Attacker(self.model, self.loaders['test'])

        self.criterion = nn.CrossEntropyLoss()
        self.optimizer = optim.SGD(self.model.parameters(),
                                   lr=config.lr,
                                   momentum=0.9,
                                   weight_decay=config.wd)

        if config.resume:
            success = self.load(config.resume_path)
            if success:
                self.writer = SummaryWriter(log_dir=config.ckpt_dir,
                                            purge_step=self.start_epoch)
            else:
                self.writer = SummaryWriter(log_dir=config.ckpt_dir)
                logger.info(self.model)
                logger.info(
                    f'Number of Total Params: {sum(p.numel() for p in self.model.parameters() if p.requires_grad)}'
                )
        else:
            self.writer = SummaryWriter(log_dir=config.ckpt_dir)
            logger.info(self.model)
            logger.info(
                f'Number of Total Params: {sum(p.numel() for p in self.model.parameters() if p.requires_grad)}'
            )

    def train_and_test(self):
        for epoch in range(self.start_epoch, self.epochs):
            logger.info(f"Epoch :[{epoch}/{self.epochs}]")
            self.train(epoch)
            test_acc = self.test(epoch)
            if epoch % 5 == 0:
                self.save(
                    {
                        'net': self.model.state_dict(),
                        'test_acc': test_acc,
                        'epoch': epoch,
                        'optim': self.optimizer.state_dict()
                    }, epoch)

        # self.writer.add_hparams(self.config.__dict__, {'hparam/test_best_acc': test_acc})
        self.writer.close()

    def train(self, epoch):
        lr = adjust_learning_rate(self.lr, self.optimizer, self.config.lr_d,
                                  epoch)
        self.model.train()
        losses = AveMeter()
        f_nfe_meter = AveMeter()
        b_nfe_meter = AveMeter()
        correct = 0
        for i, (inputs, targets) in enumerate(self.loaders['train']):
            inputs = inputs + self.config.noise * torch.randn_like(inputs)
            if self.config.use_gpu:
                inputs, targets = inputs.cuda(), targets.cuda()
            self.optimizer.zero_grad()

            if self.is_ode:
                self.model.module.nfe = 0

            outputs = self.model(inputs)

            if self.is_ode:
                nfe_forward = self.model.module.nfe.item()

            if self.config.loss == 'ce':
                loss = self.criterion(outputs, targets)
            elif self.config.loss == 'tr':
                loss = self.criterion(outputs, targets) + trades_loss(
                    self.model,
                    inputs,
                    self.optimizer,
                    distance=self.config.attack_type)
            elif self.config.loss == 'ma':
                loss = madry_loss(self.model, inputs, targets, self.optimizer)

            if self.is_ode:
                self.model.module.nfe = 0
            loss.backward()
            self.optimizer.step()

            if self.is_ode:
                nfe_backward = self.model.module.nfe.item()
                self.model.module.nfe = 0

            if self.is_ode:
                f_nfe_meter.update(nfe_forward)
                b_nfe_meter.update(nfe_backward)
            losses.update(loss.item(), inputs.size()[0])
            correct += outputs.max(1)[1].eq(targets).sum().item()

        acc = 100 * correct / len(self.datasets['train'])
        logger.info(
            f"Train: [{i + 1}/{len(self.loaders['train'])}] | "
            f"Time: {self.timer.timeSince()} | "
            f"loss: {losses.avg:.4f} | "
            f"acc: {acc:.2f}% | NFE-F: {f_nfe_meter.avg:.2f} | NFE-B: {b_nfe_meter.avg:.2f}"
        )

        self.writer.add_scalar('train/lr', lr, epoch)
        self.writer.add_scalar('train/loss', losses.avg, epoch)
        self.writer.add_scalar('train/acc', acc, epoch)
        self.writer.add_scalar('train/nfe-f', f_nfe_meter.avg, epoch)
        self.writer.add_scalar('train/nfe-b', b_nfe_meter.avg, epoch)
        for name, param in self.model.named_parameters():
            if param.grad is not None:
                self.writer.add_histogram(name,
                                          param.clone().cpu().data.numpy(),
                                          epoch)
                self.writer.add_histogram(
                    name + '/grad',
                    param.grad.clone().cpu().data.numpy(), epoch)

    def test(self, epoch):
        self.model.eval()
        losses = AveMeter()
        correct = 0
        with torch.no_grad():
            for i, (inputs, targets) in enumerate(self.loaders['test']):
                if self.config.use_gpu:
                    inputs, targets = inputs.cuda(), targets.cuda()
                outputs = self.model(inputs)
                loss = self.criterion(outputs, targets)

                losses.update(loss.item(), inputs.size()[0])
                correct += outputs.max(1)[1].eq(targets).sum().item()

            acc = 100 * correct / len(self.datasets['test'])

            out = f"Test: [{i + 1}/{len(self.loaders['test'])}] | " \
                  f"Time: {self.timer.timeSince()} | " \
                  f"loss: {losses.avg:.4f} | " \
                  f"acc: {acc:.2f}%"
            if epoch % self.config.save_freq == 0 and epoch != 0:
                acc_adv, grad_norm = self.attacker.attack(
                    self.config.attack_type, self.config.num_steps,
                    self.config.step_size, self.config.epsilon)

                out = f"Test: [{i + 1}/{len(self.loaders['test'])}] | " \
                      f"Time: {self.timer.timeSince()} | " \
                      f"loss: {losses.avg:.4f} | " \
                      f"acc: {acc:.2f}% | " \
                      f"acc_adv_{self.config.attack_type}_{self.config.num_steps}: {acc_adv:.2f}%  " \
                      f"grad_norm: {grad_norm:.4f}"
                self.writer.add_scalar(
                    f'test/acc_adv_{self.config.attack_type}_{self.config.num_steps}',
                    acc_adv, epoch)
            logger.info(out)

            self.writer.add_scalar('test/loss', losses.avg, epoch)
            self.writer.add_scalar('test/acc', acc, epoch)

        return acc

    def save(self, state, epoch):
        torch.save(state, os.path.join(self.ckpt_dir, f'ckpt_{epoch}.pt'))
        logger.info('***Saving model***')

    def load(self, path):
        assert os.path.exists(path), f"resume {path} not exists!"
        ckpt_dict = {}
        for file in os.listdir(path):
            if file.startswith('ckpt_'):
                ckpt_dict[file] = int(re.findall('_([0-9]+).pt', file)[0])
        if len(ckpt_dict) == 0:
            logger.info(
                'Do not find any checkpoint file, will train from start!')
            return False
        else:
            resume_file = sorted(ckpt_dict.items(), key=lambda x: x[1])[-1][0]
            state = torch.load(
                os.path.join(path, resume_file),
                map_location='cuda' if self.config.use_gpu else 'cpu')
            self.model.load_state_dict(state['net'])
            self.optimizer.load_state_dict(state['optim'])
            self.start_epoch = state['epoch'] + 1
            logger.info('******************************************')
            logger.info(f'Successfully load ckpt from {resume_file}')
            return True

    def save_config(self, config):
        with open(os.path.join(self.ckpt_dir, 'config.json'), 'w+') as f:
            f.write(json.dumps(config.__dict__, indent=4))
        f.close()
Ejemplo n.º 6
0
    size = test_data.shape[0]
    idx_array = np.arange(size)
    attacker = Attacker(run_tag=opt.run_tag, top_k=opt.topk, e=opt.e,
                        model_type=opt.model, cuda=opt.cuda, normalize=opt.normalize)
    # record of the running time
    start_time = time.time()
    # count the number of the successful instances, mse,iterations,queries
    success_cnt = 0
    right_cnt = 0
    total_mse = 0
    total_iterations = 0
    total_quries = 0
    for idx in idx_array:
        print('###Start %s : generating adversarial example of the %d sample ###' % (opt.run_tag, idx))
        attack_ts, info = attacker.attack(sample_idx=idx, target_class=opt.target_class,
                                          factor=opt.magnitude_factor, max_iteration=opt.maxitr,
                                          popsize=opt.popsize)

        # only save the successful adversarial example
        if info[-1] == 'Success':
            success_cnt = success_cnt + 1
            total_iterations += info[-2]
            total_mse += info[-3]
            total_quries += info[-4]

            file = open('result_' + str(opt.magnitude_factor) + '_' + str(opt.topk) + '_' + opt.model
                        + '/' + opt.run_tag + '/attack_time_series.txt', 'a+')
            file.write('%d %d ' % (idx, info[3]))
            for i in attack_ts:
                file.write('%.4f ' % i)
            file.write('\n')
Ejemplo n.º 7
0
Archivo: main.py Proyecto: unshorn/jAEk
    # session: reflects the session within a DB. It is deprecated. Just set it to ABC
    #user = User("WordpressX", 0, "http://localhost:8080/wp-login.php", login_data = {"log": "admin", "pwd": "admin"}, session="ABC")

    # Crawl without user session. Parameter desc: Name of DB - Privilege level - session
    user = User("Test", 0, session="ABC")

    url = "http://localhost/"
    # Creates the crawler config: URL: start url of the crawler(independent from login) - max_dept: how deep to crawl(link), max_click_depth: how deep to follow events - Crawlspeed: Fast is the best value here
    crawler_config = CrawlConfig("Some Name, doesn't matter",
                                 url,
                                 max_depth=1,
                                 max_click_depth=2,
                                 crawl_speed=CrawlSpeed.Fast)

    # From here you have nothing to chance. Except you want no attacking, then comment out the lines down
    logging.info("Crawler started...")
    database_manager = DatabaseManager(user, dropping=True)
    crawler = Crawler(
        crawl_config=crawler_config,
        database_manager=database_manager)  #, proxy="localhost", port=8082)
    crawler.crawl(user)
    logging.info("Crawler finished")

    # If you want no attacking comment out the lines below.
    logging.info("Start attacking...")
    attack_config = AttackConfig(url)
    attacker = Attacker(
        attack_config,
        database_manager=database_manager)  #, proxy="localhost", port=8082)
    attacker.attack(user)
    logging.info("Finish attacking...")
Ejemplo n.º 8
0
def tencent_run_test(event, context):
    logging.info('Tencent Serverless.')
    logging.info(context)
    args = process_args_dict(event)
    attacker = Attacker(*args)
    attacker.attack()
Ejemplo n.º 9
0
def huawei_run_test(event, context):
    logging.info('Huawei Serverless.')
    logging.info(context)
    args = process_args_dict(event)
    attacker = Attacker(*args)
    attacker.attack()
Ejemplo n.º 10
0
def aliyun_run_test(event, context):
    logging.info('Aliyun Serverless.')
    logging.info(context)
    args = process_args_dict(json.loads(event))
    attacker = Attacker(*args)
    attacker.attack()
Ejemplo n.º 11
0
def tencent_run_api(event, context):
    logging.info('Tencent Serverless.')
    logging.info(context)
    args = process_args_dict(event['queryString'])
    attacker = Attacker(*args)
    attacker.attack()
Ejemplo n.º 12
0
from args_parser import process_args_dict, console_get_args_dict
from attacker import Attacker, Mode

if __name__ == "__main__":
    # print(get_args())
    args = process_args_dict(console_get_args_dict())
    attacker = Attacker(*args)
    attacker.attack()
Ejemplo n.º 13
0
    # In the Userobject, the first string you set is the name of the crawl run and also the name of the created database.
    # So if you want to keep old runs then just give different names for each crawl


    # The first of the line below, starts a scan with a logged in user.
    # Parameter desc: Name of DB - Privilege level: deprecated(Just let it 0) - URL where the login form is stored - login data as dict. The key is the parameter name in the login form that has to be set -
    # session: reflects the session within a DB. It is deprecated. Just set it to ABC
    #user = User("WordpressX", 0, "http://localhost:8080/wp-login.php", login_data = {"log": "admin", "pwd": "admin"}, session="ABC")


    # Crawl without user session. Parameter desc: Name of DB - Privilege level - session
    user = User("Test", 0, session="ABC")

    url = "http://localhost/"
    # Creates the crawler config: URL: start url of the crawler(independent from login) - max_dept: how deep to crawl(link), max_click_depth: how deep to follow events - Crawlspeed: Fast is the best value here
    crawler_config = CrawlConfig("Some Name, doesn't matter", url, max_depth=1, max_click_depth=2, crawl_speed=CrawlSpeed.Fast)

    # From here you have nothing to chance. Except you want no attacking, then comment out the lines down
    logging.info("Crawler started...")
    database_manager = DatabaseManager(user, dropping=True)
    crawler = Crawler(crawl_config=crawler_config, database_manager=database_manager)#, proxy="localhost", port=8082)
    crawler.crawl(user)
    logging.info("Crawler finished")

    # If you want no attacking comment out the lines below.
    logging.info("Start attacking...")
    attack_config = AttackConfig(url)
    attacker = Attacker(attack_config, database_manager=database_manager)#, proxy="localhost", port=8082)
    attacker.attack(user)
    logging.info("Finish attacking...")
Ejemplo n.º 14
0
    # set dataset
    dataset = ImageNet_A(args.input_dir)
    loader = torch.utils.data.DataLoader(dataset,
                                         batch_size=args.batch_size,
                                         shuffle=False)

    # set attacker
    attacker = Attacker(steps=args.steps,
                        max_norm=args.max_norm / 255.0,
                        div_prob=args.div_prob,
                        device=torch.device('cuda'))

    for ind, (img, label_true, label_target, filenames) in enumerate(loader):

        # run attack
        adv = attacker.attack(model, img.cuda(), label_true.cuda(),
                              label_target.cuda())

        # save results
        for bind, filename in enumerate(filenames):
            out_img = adv[bind].detach().cpu().numpy()
            delta_img = np.abs(out_img - img[bind].numpy()) * 255.0

            print('Attack on {}:'.format(os.path.split(filename)[-1]))
            print('Max: {0:.0f}, Mean: {1:.2f}'.format(np.max(delta_img),
                                                       np.mean(delta_img)))

            out_img = np.transpose(out_img, axes=[1, 2, 0]) * 255.0
            out_img = out_img[:, :, ::-1]

            out_filename = os.path.join(output_dir,
                                        os.path.split(filename)[-1])
Ejemplo n.º 15
0
                        help='How many folds to use for cross-validation')
    parser.add_argument('--nocv',
                        type=int,
                        default=False,
                        help='Disables cross-validation')

    args = parser.parse_args()
    if not args.wav:
        print("Can't work without wav file!")

    # FILE IMPORT

    data = Data()
    X, y = data.process(args.wav, attack=bool(args.attack))

    # attack mode
    if args.attack:
        print(args.attack)
        attacker = Attacker()
        attacker.attack(X, y, args.attack)

    # train mode
    else:
        # ML Model creating and training
        model_creator = Model()
        model = model_creator.fit_data(X, y)
        model_creator.dump(args.wav)
        print("Dumped")
        if not args.nocv:
            model_creator.predict_accuracy(X, y)