Ejemplo n.º 1
0
    def get_batch_accuracy(self, net, inp, label):

        adv_inp = self.attack(net, inp, label)

        pred = net(adv_inp)

        accuracy = torch_accuracy(pred, label, (1, ))[0].item()

        return accuracy
Ejemplo n.º 2
0
def single_crop_eval(model_path, multi_gpu = True, gpu_id = 0, info_path = './training_models/answer.txt'):
    ds_val = TorchDataset('validation')

    dl_val = DataLoader(ds_val, batch_size=minibatch_size, shuffle=True, num_workers=num_workers)

    net = SEResNeXt(101, num_class=80)
    if multi_gpu:
        init_trained_weights_parall(net, model_path)
    else:
        init_trained_weights(net, model_path)
    net.cuda(gpu_id)

    # In[22]:
    ud_loss_m = AvgMeter('ud_loss')
    accuracy_m = AvgMeter('top-1-accuracy')
    top3_accuracy_m = AvgMeter('top-3-accuracy')

    criterion = nn.CrossEntropyLoss().cuda(gpu_id)

    net.eval()
    print('Begin validation!')

    ud_loss_m.reset()
    accuracy_m.reset()
    top3_accuracy_m.reset()
    for i, mn_batch in enumerate(dl_val):

      #  if i > 401:
       #     break
        data = mn_batch['data'].type(torch.FloatTensor)
        label = mn_batch['label'].type(torch.LongTensor).squeeze_()
        inp_var = Variable(data).cuda(gpu_id)
        label_var = Variable(label).cuda(gpu_id)


        pred = net.forward(inp_var)

        ud_loss = criterion(pred, label_var)

        acc, t3_acc = metrics.torch_accuracy(pred.data, label_var.data, (1, 3))

        ud_loss_m.update(ud_loss.data[0], inp_var.size(0))
        accuracy_m.update(acc[0], inp_var.size(0))
        top3_accuracy_m.update(t3_acc[0], inp_var.size(0))

    print('Validation Done!')
    print('ud_loss: {}, accuracy: {}, top-3-accuracy {}'.format(ud_loss_m.mean, accuracy_m.mean, top3_accuracy_m.mean))

    append_answr(info_path, model_path, [ud_loss_m.mean, accuracy_m.mean, top3_accuracy_m.mean])
Ejemplo n.º 3
0
def adversairal_train_one_epoch(net, optimizer, batch_generator, criterion, AttackMethod, clock, attack_freq = 1, use_adv = True):
    """
    adversarial training.

    :param net:
    :param optimizer:
    :param batch_generator:
    :param criterion:
    :param AttackMethod:  the attack method
    :param clock: clock object from my_snip.clock import TrainClock
    :param attack_freq:  Frequencies of training with adversarial examples
    :return:
    """

    training_losses = AvgMeter()
    training_accs = AvgMeter()

    clean_losses = AvgMeter()
    clean_accs = AvgMeter()

    defense_losses = AvgMeter()
    defense_accs = AvgMeter()
    names = ['loss', 'acc', 'clean_loss', 'clean_acc', 'adv_loss', 'adv_acc']

    clean_batch_times = AvgMeter()
    ad_batch_times = AvgMeter()
    net.train()
    clock.tock()

    pbar = tqdm(batch_generator)

    start_time = time.time()
    for (data, label) in pbar:
        clock.tick()

        data = data.cuda()
        label = label.cuda()


        data_time = time.time() - start_time

        if clock.minibatch % (attack_freq + 1) == 1 and use_adv:

            adv_inp = AttackMethod.attack(net, data, label)

            net.train()
            optimizer.zero_grad()

            pred = net(adv_inp)
            loss = criterion(pred, label)

            loss.backward()

            optimizer.step()

            defense_losses.update(loss.item())

            acc = torch_accuracy(pred, label, (1, ))

            defense_accs.update(acc[0].item())

            batch_time = time.time() - start_time
            ad_batch_times.update(batch_time)
        else:
            optimizer.zero_grad()
            pred = net(data)
            loss = criterion(pred, label)
            loss.backward()
            optimizer.step()
            acc = torch_accuracy(pred, label, (1, ))

            clean_losses.update(loss.item())
            clean_accs.update(acc[0].item())

            batch_time = time.time() - start_time
            clean_batch_times.update(batch_time)

        training_losses.update(loss.item())
        training_accs.update(acc[0].item())

        pbar.set_description("Training Epoch: {}".format(clock.epoch))

        values = [training_losses.mean, training_accs.mean, clean_losses.mean, clean_accs.mean, defense_losses.mean,
                  defense_accs.mean]
        pbar_dic = OrderedDict()
        for n, v in zip(names, values):
            pbar_dic[n] = v
        pbar_dic['clean_time'] = "{:.1f}".format(clean_batch_times.mean)
        pbar_dic['ad_time'] = "{:.1f}".format(ad_batch_times.mean)
        pbar.set_postfix(pbar_dic)
        '''
        pbar.set_postfix(
            loss = '{:.2f}'.format(training_losses.mean),
            acc = '{:.2f}'.format(training_accs.mean),
            clean_losses = '{:.2f}'.format(clean_losses.mean),
            clean_acc = "{:.2f}".format(clean_accs.mean),
            defense_losses = "{:.2f}".format(defense_losses.mean),
            defense_accs = "{:.2f}".format(defense_accs.mean),
            clean_time = "{:.2f}".format(clean_batch_times.mean),
            ad_time = "{:.2f}".format(ad_batch_times.mean)
        )'''

        start_time = time.time()

    #names = ['loss', 'acc', 'clean_loss', 'clean_acc', 'adv_loss', 'adv_acc']
    values = [training_losses.mean, training_accs.mean, clean_losses.mean, clean_accs.mean, defense_losses.mean,
              defense_accs.mean]

    dic = {n:v for n,v in zip(names, values)}
    dic = OrderedDict(dic)
    return dic
Ejemplo n.º 4
0
def adversarial_val(net, batch_generator, criterion, AttackMethod, clock, attack_freq = 1):
    """
        val both on clean data and adversarial examples.

        :param net:
        :param batch_generator:
        :param criterion:
        :param AttackMethod:  the attack method
        :param clock: clock object from my_snip.clock import TrainClock
        :param attack_freq:  Frequencies of training with adversarial examples
        :return:
        """

    training_losses = AvgMeter()
    training_accs = AvgMeter()

    clean_losses = AvgMeter()
    clean_accs = AvgMeter()

    defense_losses = AvgMeter()
    defense_accs = AvgMeter()
    names = ['loss', 'acc', 'clean_loss', 'clean_acc', 'adv_loss', 'adv_acc']

    clean_batch_times = AvgMeter()
    ad_batch_times = AvgMeter()
    net.eval()


    pbar = tqdm(batch_generator)

    start_time = time.time()
    i = 0
    for (data, label) in pbar:

        i += 1
        data = data.cuda()
        label = label.cuda()


        data_time = time.time() - start_time

        if i % (attack_freq + 1) == 1:

            adv_inp = AttackMethod.attack(net, data, label)

            net.eval()

            with torch.no_grad():
                pred = net(adv_inp)
                loss = criterion(pred, label)

                defense_losses.update(loss.item())

                acc = torch_accuracy(pred, label, (1,))

                defense_accs.update(acc[0].item())

                batch_time = time.time() - start_time
                ad_batch_times.update(batch_time)
        else:

            with torch.no_grad():
                pred = net(data)
                loss = criterion(pred, label)
                acc = torch_accuracy(pred, label, (1,))

                clean_losses.update(loss.item())
                clean_accs.update(acc[0].item())

                batch_time = time.time() - start_time
                clean_batch_times.update(batch_time)

        training_losses.update(loss.item())
        training_accs.update(acc[0].item())

        pbar.set_description("Validation Epoch: {}".format(clock.epoch))

        values = [training_losses.mean, training_accs.mean, clean_losses.mean, clean_accs.mean, defense_losses.mean,
                  defense_accs.mean]
        pbar_dic = OrderedDict()
        for n, v in zip(names, values):
            pbar_dic[n] = v
        pbar.set_postfix(pbar_dic)
        pbar_dic['clean_time'] = "{:.2f}".format(clean_batch_times.mean)
        pbar_dic['ad_time'] = "{:.2f}".format(ad_batch_times.mean)
        '''
        pbar.set_postfix(
            loss='{:.2f}'.format(training_losses.mean),
            acc='{:.2f}'.format(training_accs.mean),
            clean_losses='{:.2f}'.format(clean_losses.mean),
            clean_acc="{:.2f}".format(clean_accs.mean),
            defense_losses="{:.2f}".format(defense_losses.mean),
            defense_accs="{:.2f}".format(defense_accs.mean),
            clean_time="{:.2f}".format(clean_batch_times.mean),
            ad_time="{:.2f}".format(ad_batch_times.mean)
        )
        '''
        start_time = time.time()


    values = [training_losses.mean, training_accs.mean, clean_losses.mean, clean_accs.mean, defense_losses.mean,
              defense_accs.mean]

    dic = {n:v for n, v in zip(names, values)}
    dic = OrderedDict(dic)
    return dic
        clock.tick()
        data = mn_batch['data'].type(torch.FloatTensor)
        label = mn_batch['label'].type(torch.LongTensor).squeeze_()

        inp_var = Variable(data).cuda(gpu_ids[0])
        label_var = Variable(label).cuda(gpu_ids[0])

        optimizer.zero_grad()

        pred = net.forward(inp_var)

        #print(label_var.size())
        ud_loss = criterion(pred, label_var)

        acc, t3_acc = metrics.torch_accuracy(pred.data, label_var.data, (1, 3))

        writer.add_scalar('Train/un_decay_loss', ud_loss.data[0], clock.step)
        writer.add_scalar('Trian/top_acc', acc[0], clock.step)
        writer.add_scalar('Trian/top_3_acc', t3_acc[0], clock.step)

        ud_loss_m.update(ud_loss.data[0], inp_var.size(0))
        accuracy_m.update(acc[0], inp_var.size(0))

        top3_accuracy_m.update(t3_acc[0], inp_var.size(0))

        ud_loss.backward()

        optimizer.step()

        batch_time_m.update(time.time() - start_time)