Пример #1
0
def test(epoch):
    global best_acc
    net.eval()
    test_loss = 0
    correct = 0
    total = 0
    with torch.no_grad():
        for batch_idx, (inputs, targets) in enumerate(testloader):
            inputs, targets = inputs.to(device), targets.to(device)
            outputs = net(inputs)
            loss = criterion(outputs, targets)

            test_loss += loss.item()
            _, predicted = outputs.max(1)
            total += targets.size(0)
            correct += predicted.eq(targets).sum().item()

            progress_bar(batch_idx, len(testloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'
                % (test_loss/(batch_idx+1), 100.*correct/total, correct, total))

    # Save checkpoint.
    acc = 100.*correct/total
    if acc > best_acc:
        print('Saving..')
        state = {
            'net': net.state_dict(),
            'acc': acc,
            'epoch': epoch,
        }
        if not os.path.isdir('checkpoint'):
            os.mkdir('checkpoint')
        torch.save(state, './checkpoint/ckpt.pth')
        best_acc = acc
    return acc
def train(epoch, lr_change=False):
    print('\nEpoch: %d' % epoch)
    net.train()
    train_loss = 0
    correct = 0
    total = 0

    for batch_idx, (inputs, targets) in enumerate(trainloader):
        inputs, targets = inputs.to(device), targets.to(device)

        optimizer.zero_grad()
        outputs = net(inputs)
        loss = criterion(outputs, targets)
        loss.backward()
        optimizer.step()

        train_loss += loss.item()
        _, predicted = outputs.max(1)
        total += targets.size(0)
        correct += predicted.eq(targets).sum().item()

        progress_bar(
            batch_idx, len(trainloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)' %
            (train_loss /
             (batch_idx + 1), 100. * correct / total, correct, total))
 def valid(self, epoch):
     self.net.eval()
     self.valid_loss = 0; total = 0; correct = 0
     with torch.no_grad():
         for batch_idx, (inputs, targets) in enumerate(self.validloader):
             inputs, targets = inputs.to(self.device), targets.to(self.device)
             outputs = self.net(inputs)
             loss = self.criterion1(outputs, targets)
             self.valid_loss += loss.item()
             _, predicted = outputs.max(1)
             total += targets.size(0)
             correct += predicted.eq(targets).sum().item()
             progress_bar(batch_idx, len(self.validloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)' % 
                 (self.valid_loss/(batch_idx+1), 100.*correct/total, correct, total))
     acc = 100.*correct/total
     print('Validation Accuracy: ', acc)
     if self.valid_loss < self.best_loss:
         print('Saving..')
         if not os.path.isdir('checkpoint'):
             os.mkdir('checkpoint')
         self.best_net = copy.deepcopy(self.net) 
         self.best_loss = self.valid_loss
         self.early = 0
     else:
         self.early += 1
 def train(self, epoch):
     print('\nEpoch: %d' % epoch)
     self.net.train()
     train_loss = 0
     correct = 0
     total = 0
     for batch_idx, (inputs, targets) in enumerate(self.trainloader):
         inputs, targets = inputs.to(self.device), targets.to(self.device)
         self.optimizer.zero_grad()
         outputs = self.net(inputs)
         try:
             loss = self.criterion(outputs, targets)
         except:
             loss = self.criterion(outputs, targets, inputs)
         loss.backward()
         self.optimizer.step()
         train_loss += loss.item()
         _, predicted = outputs.max(1)
         total += targets.size(0)
         correct += predicted.eq(targets).sum().item()
         progress_bar(
             batch_idx, len(self.trainloader),
             'Loss: %.3f | Acc: %.3f%% (%d/%d)' %
             (train_loss /
              (batch_idx + 1), 100. * correct / total, correct, total))
     self.scheduler.step()
     print('Train Loss:', train_loss)
Пример #5
0
def main():
    try:
        FORMAT = '%(asctime)-15s %(clientip)s %(user)-8s %(message)s'

        # logging.basicConfig(format=FORMAT, level=logging.DEBUG)

        list_of_files = glob.glob(MATCHES_FOLDER + '/*.json')
        latest_file = max(list_of_files, key=os.path.getctime)

        logging.info('Found %d files', len(list_of_files))

        out_data = []
        total = len(list_of_files)

        for i, filename in enumerate(list_of_files):
            with open(filename, 'rt') as in_file:
                try:
                    data = json.loads(in_file.read())
                    if data:
                        res = reparse(data)
                        if res:
                            out_data.append(res)

                except json.decoder.JSONDecodeError:
                    pass
                except Exception as e:
                    logging.exception('Failed to read file %s', filename)
            progress_bar(i, total)

        with open(SAVE_FILE, 'wt') as out_file:
            json.dump(out_data, out_file)

    except Exception as e:
        logging.exception('Failed to enumerate files')
Пример #6
0
def test(model, criterion, testloader, device):
    model.eval()
    print(
        '---------------------------------------------Test------------------------------------------'
    )
    test_loss = 0
    correct = 0
    for batch_idx, (data, target) in enumerate(testloader):
        data, target = data.to(device), target.to(device)
        output = model(data, device)
        #test_loss += criterion(output, target).data.item()
        #test_loss += criterion(output, target).data.item()
        test_loss += criterion(output, target).item()
        pred = output.data.max(1, keepdim=True)[1]
        # 保持维度不知道是为了什么,但保持好吧;.max()或者.min()返回的是一个二元组,(最大值,索引)
        #correct += pred.eq(target.data.view_as(pred)).cpu().sum()  #不加cpu()行吗
        correct += pred.eq(target.data.view_as(pred)).sum()

        progress_bar(batch_idx, len(testloader), msg='test')

    test_loss /= len(testloader.dataset)
    '''固定用法,不用做什么改变就可以用;len(testloader),len(testloader.dataset)'''

    print('\nTestset: Average loss: %.4f, Accuracy: %d/%d (%%%.2f)\n' %
          (test_loss, correct, len(testloader.dataset),
           100.0 * float(correct) / len(testloader.dataset)))
    return (test_loss, float(correct) / len(testloader.dataset))
Пример #7
0
def test(epoch, test_loader, save=True):
    global best_acc
    net.eval()

    batch_time = AverageMeter()
    losses = AverageMeter()
    top1 = AverageMeter()
    top5 = AverageMeter()
    end = time.time()

    with torch.no_grad():
        for batch_idx, (inputs, targets) in enumerate(test_loader):
            if use_cuda:
                inputs, targets = inputs.cuda(), targets.cuda()
            outputs = net(inputs)
            loss = criterion(outputs, targets)

            # measure accuracy and record loss
            prec1, prec5 = accuracy(outputs.data, targets.data, topk=(1, 5))
            losses.update(loss.item(), inputs.size(0))
            top1.update(prec1.item(), inputs.size(0))
            top5.update(prec5.item(), inputs.size(0))
            # timing
            batch_time.update(time.time() - end)
            end = time.time()

            progress_bar(
                batch_idx, len(test_loader),
                'Loss: {:.3f} | Acc1: {:.3f}% | Acc5: {:.3f}%'.format(
                    losses.avg, top1.avg, top5.avg))

    if save:
        writer.add_scalar('loss/test', losses.avg, epoch)
        writer.add_scalar('acc/test_top1', top1.avg, epoch)
        writer.add_scalar('acc/test_top5', top5.avg, epoch)

        is_best = False
        if top1.avg > best_acc:
            best_acc = top1.avg
            is_best = True

        print('Current best acc: {}'.format(best_acc))
        save_checkpoint(
            {
                'epoch':
                epoch,
                'model':
                args.model,
                'dataset':
                args.dataset,
                'state_dict':
                net.module.state_dict()
                if isinstance(net, nn.DataParallel) else net.state_dict(),
                'acc':
                top1.avg,
                'optimizer':
                optimizer.state_dict(),
            },
            is_best,
            checkpoint_dir=log_dir)
Пример #8
0
def val(net, criterion):
    
    
    dataset = VOCDetection(None, transform=SSDAugmentation(cfg['min_dim'],MEANS,'val'),phase='val')
    data_loader = data.DataLoader(dataset, 1,num_workers=1,collate_fn=detection_collate,pin_memory=True)
    
    data_size = len(dataset)
    
    batch_iterator = iter(data_loader)
  
    loss_all = 0
    
    with torch.no_grad():
        for step in range(0, data_size):
            images, targets = next(batch_iterator)
            
            images = Variable(images.cuda())
            with torch.no_grad():
                targets = [ann.cuda() for ann in targets]
            
            
            out = net(images)
            loss_l, loss_c = criterion(out, targets)
            loss = loss_l + loss_c
            
            loss_all = loss_all + loss.data
  
            progress_bar(step, data_size, ' avg loss %.4f' % (loss_all/step))
      
    return loss_all/data_size
def eval(netC, identity_grid, noise_grid, test_dl, opt):
    print(" Eval:")
    acc_clean = 0.0
    acc_bd = 0.0
    total_sample = 0
    total_correct_clean = 0
    total_correct_bd = 0

    for batch_idx, (inputs, targets) in enumerate(test_dl):
        inputs, targets = inputs.to(opt.device), targets.to(opt.device)
        bs = inputs.shape[0]
        total_sample += bs

        # Evaluating clean
        preds_clean = netC(inputs)
        correct_clean = torch.sum(torch.argmax(preds_clean, 1) == targets)
        total_correct_clean += correct_clean
        acc_clean = total_correct_clean * 100.0 / total_sample

        # Evaluating backdoor
        grid_temps = (identity_grid + opt.s * noise_grid / opt.input_height) * opt.grid_rescale
        grid_temps = torch.clamp(grid_temps, -1, 1)
        inputs_bd = F.grid_sample(inputs, grid_temps.repeat(bs, 1, 1, 1), align_corners=True)
        if opt.attack_mode == "all2one":
            targets_bd = torch.ones_like(targets) * opt.target_label
        if opt.attack_mode == "all2all":
            targets_bd = torch.remainder(targets, opt.num_classes)
        preds_bd = netC(inputs_bd)
        correct_bd = torch.sum(torch.argmax(preds_bd, 1) == targets_bd)
        total_correct_bd += correct_bd
        acc_bd = total_correct_bd * 100.0 / total_sample

        progress_bar(batch_idx, len(test_dl), "Acc Clean: {:.3f} | Acc Bd: {:.3f}".format(acc_clean, acc_bd))
    return acc_clean, acc_bd
Пример #10
0
def train(epoch):
    print('\nEpoch: %d' % epoch)
    net.train()
    train_loss = 0
    for batch_idx, (images, loc_targets,
                    conf_targets) in enumerate(trainloader):
        if use_cuda:
            images = images.cuda()
            loc_targets = loc_targets.cuda()
            conf_targets = conf_targets.cuda()

        images = Variable(images)
        loc_targets = Variable(loc_targets)
        conf_targets = Variable(conf_targets)

        optimizer.zero_grad()
        loc_preds, conf_preds = net(images)
        loss = criterion(loc_preds, loc_targets, conf_preds, conf_targets)
        loss.backward()
        optimizer.step()

        train_loss += loss.data[0]
        # print('%.3f %.3f' % (loss.data[0], train_loss / (batch_idx + 1)))
        progress_bar(
            batch_idx, len(trainloader), "iter Loss: %.3f | Avg Loss: %.3f%%" %
            (loss.data[0], train_loss / (batch_idx + 1)))
Пример #11
0
def train(epoch, model, criterion, optimizer, trainloader, device):
    model.train()
    print(
        '---------------------------------------------Epoch: %d--------------------------------------------------'
        % epoch)
    total_loss_train = 0  # 用于tensorboard
    # python中的两种标准化输出方式
    for batch_idx, (data, target) in enumerate(trainloader):
        data, target = data.to(device), target.to(device)
        model.zero_grad()
        output = model(data,
                       device)  # data作为参数传递给了model实例中的forward了,并且forward会自动执行
        # 这一句可以理解为output = forward(data)
        loss = criterion(
            output, target)  # loss已经是这一个batch的总体损失啦,没有必要loss.item()*batch_size
        total_loss_train += loss.item()
        '''output是二维的,target是一维的这都能行?得打印出来看看。因为NLLoss自带softmax,所以这里没有用
        logsoftmax也会把output这个二维的转化成一维的'''
        loss.backward()
        optimizer.step()
        '''一个batch就正反方向一次'''
        progress_bar(batch_idx, len(trainloader),
                     'loss: ' + str('%.4f' % loss.item()) + ' | train')
        '''
        不是loss.item()就行了吗?.data干嘛?这是因为loss.item()只适用含有一个数值的tensor,而.data.item()
        ,最好是.detach().item()什么情况都适用
        '''

    # return '%.4f' % (total_loss_train / len(trainloader.dataset))
    return total_loss_train / len(trainloader.dataset)
Пример #12
0
def testEncoder(args, encoder, testloader, device):
    test_loss = 0
    test_correct = 0
    test_total = 0
    test_correct_m1 = 0
    test_correct_0 = 0
    test_correct_1 = 0
    criterion = nn.CrossEntropyLoss()
    encoder.cnn.eval()
    encoder.connect.eval()
    for batch_idx, (img1, img2, targets) in enumerate(testloader):
        targets = targets.to(device)
        with torch.no_grad():
            _,_,outputs,_ = encoder.predconnectfromimg(img1, img2)
        cnn_targets = targets.clone()
        cnn_targets[targets==-1] = 0
        loss = criterion(outputs, cnn_targets)

        test_loss += loss.item()
        _, predicted = outputs.max(1)
        test_total += targets.size(0)
        test_correct += predicted.eq(cnn_targets).sum().item()

        test_correct_m1 += predicted[targets == -1].eq(cnn_targets[targets == -1]).sum().item()
        test_correct_0 += predicted[targets == 0].eq(cnn_targets[targets == 0]).sum().item()
        test_correct_1 += predicted[targets == 1].eq(cnn_targets[targets == 1]).sum().item()

        progress_bar(batch_idx, len(testloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'
                     % (test_loss/(batch_idx+1), 100.*test_correct/test_total, test_correct, test_total))
    print("same node:{}%; inconnect node:{}%; connect node:{}%".format(100.*test_correct_m1/test_total, 100.*test_correct_0/test_total, 100.*test_correct_1/test_total))
def save_augs(JPEG_dir, anno_dir, idx_i, aug_img, aug_bb, imgid):
    progress_bar(idx_i, len(train_dataset), " Augmenting.........")
    save_path_img = os.path.join(JPEG_dir, "{}.jpg".format(imgid))
    imageio.imwrite(save_path_img, aug_img)
    save_path_xml = os.path.join(anno_dir, "{}.xml".format(imgid))
    bbox = []
    for bb in aug_bb.bounding_boxes:
        bbox.append([bb.x1, bb.y1, bb.x2, bb.y2, 1])
    write_pascal_annotation_aug(save_path_img, bbox, save_path_xml)
Пример #14
0
def train_(train_iter, net, opt, loss_function, loss_type, ind_ignore,
           n_classes):
    net.train()
    train_loss = 0
    total = 0
    # Create the confusion matrix
    cm = np.zeros((n_classes, n_classes))
    nTrain = train_iter.nbatches
    for batch_idx in range(nTrain):
        all_data = train_iter.next()
        data = all_data[0]
        target = all_data[1]

        data, target = data.transpose((0, 3, 1, 2)), target.transpose(
            (0, 3, 1, 2))
        data, target = torch.from_numpy(data), torch.from_numpy(target)
        data, target = data.cuda(), target.cuda()
        data, target = Variable(data), Variable(target)
        opt.zero_grad()

        output = net(data)
        target = target.type(torch.FloatTensor).cuda()

        _, target_indices = torch.max(target, 1)
        _, output_indices = torch.max(output, 1)
        flattened_output = output_indices.view(-1)
        flattened_target = target_indices.view(-1)

        if loss_type == 'cce_soft':
            loss = cce_soft(output, target, ignore_label=ind_ignore)
        else:
            loss = loss_function(output, target_indices)

        cm = confusion_matrix(cm,
                              flattened_output.data.cpu().numpy(),
                              flattened_target.data.cpu().numpy(), n_classes)
        loss.backward()
        nn.utils.clip_grad_norm(net.parameters(), max_norm=4)
        opt.step()

        train_loss += loss.data[0]
        _, predicted = torch.max(output.data, 1)
        total += target.size(0)

        progress_bar(batch_idx, nTrain,
                     'Loss: %.3f' % (train_loss / (batch_idx + 1)))

        del (output)
        del (loss)
        del (flattened_output)
        del (output_indices)

    jaccard_per_class, jaccard, accuracy = compute_metrics(cm)
    metrics_string = print_metrics(train_loss, nTrain, n_classes,
                                   jaccard_per_class, jaccard, accuracy)
    print(metrics_string)
    return jaccard, jaccard_per_class, accuracy, train_loss / (nTrain)
Пример #15
0
def train(epoch):
    model.train()
    print('----------------------------------------Epoch: {}----------------------------------------'.format(epoch))
    for batch_idx, (data, target) in enumerate(trainloader):
        data, target = data.cuda(), target.cuda()
        model.zero_grad()
        output = model(data)
        loss = criterion(output, target)
        loss.backward()
        optimizer.step()
        progress_bar(batch_idx, len(trainloader), 'loss: ' + str('{:.4f}'.format(loss.data.item())) + ' | train')
Пример #16
0
def train(epoch, train_loader):
    print('\nEpoch: %d' % epoch)
    net.train()

    batch_time = AverageMeter()
    losses = AverageMeter()
    top1 = AverageMeter()
    top5 = AverageMeter()
    end = time.time()
    for batch_idx, (inputs, targets) in enumerate(train_loader):
        optimizer.zero_grad()
        if use_cuda:
            inputs, targets = inputs.cuda(), targets.cuda()
        if not args.mixup:
            outputs = net(inputs)
            loss = criterion(outputs, targets)
        else:
            inputs, targets_a, targets_b, lam = mixup_data(inputs,
                                                           targets,
                                                           use_cuda=use_cuda)
            outputs = net(inputs)
            loss = mixup_criterion(criterion, outputs, targets_a, targets_b,
                                   lam)

        loss.backward()
        optimizer.step()

        # measure accuracy and record loss
        prec1, prec5 = accuracy(outputs.data, targets.data, topk=(1, 5))
        losses.update(loss.item(), inputs.size(0))
        if not args.mixup:
            top1.update(prec1.item(), inputs.size(0))
        else:
            _, predicted = torch.max(outputs.data, 1)
            prec1 = (
                lam * predicted.eq(targets_a.data).cpu().sum().float() +
                (1 - lam) * predicted.eq(targets_b.data).cpu().sum().float())
            prec1 = prec1 * 100 / inputs.size(0)
            top1.update(prec1, inputs.size(0))

        top5.update(prec5.item(), inputs.size(0))
        # timing
        batch_time.update(time.time() - end)
        end = time.time()

        progress_bar(
            batch_idx, len(train_loader),
            'Loss: {:.3f} | Acc1: {:.3f}% | Acc5: {:.3f}%'.format(
                losses.avg, top1.avg, top5.avg))
        # disp_mask(net, args.prune_layer)
    writer.add_scalar('loss/train', losses.avg, epoch)
    writer.add_scalar('acc/train_top1', top1.avg, epoch)
    writer.add_scalar('acc/train_top5', top5.avg, epoch)
Пример #17
0
    def _validate(self, epoch, net, criterion, valid_loader, best_acc):

        # 모델, 즉, net 을 .eval() 모드로 바꿈.
        net.eval()

        # validation 손실값을 담을 empty array 준비.
        h = np.array([])

        valid_loss = 0
        correct = 0
        total = 0

        # validation steps
        with torch.no_grad():
            for i, (inputs, labels) in enumerate(valid_loader):
                inputs, labels = inputs.to(self.device), labels.to(self.device)
                outputs = net(inputs)
                loss = criterion(outputs, labels)
                h = np.append(h, loss.item())

                valid_loss += loss.item()
                _, predicted = outputs.max(1)
                total += labels.size(0)
                correct += predicted.eq(labels).sum().item()

                utils.progress_bar(i, len(valid_loader), 'Loss: %.4f | Acc: %.4f%% (%d/%d)'
                                   % (valid_loss / (i + 1), 100. * correct / total, correct, total))

            valid_loss = np.mean(h)

        # Save checkpoint.
        model_checkpoints_folder = os.path.join('./weights', 'checkpoints')

        # 해당 epoch 에서 정확도를 계산.
        # 최초 epoch 의 모델은 반드시 저장. 이후 epoch 부터는 바로 이전까지의 epoch 들에서 나온 acc 들과 비교하여
        # best 일때만 해당 epoch 모델을 저장함.
        acc = 100. * correct / total
        if acc > best_acc:
            print('Saving..')
            state = {
                'net': net.state_dict(),
                'acc': acc,
                'epoch': epoch,
            }
            torch.save(state, os.path.join(model_checkpoints_folder, 'model.pth'))
            best_acc = acc

        # 다음 epoch 을 위해 다시 모델을 .train() 모드로 전환함.
        net.train()
        return valid_loss, best_acc
Пример #18
0
    def train_epoch(self, data):
        tr_loss = AverageMeter()
        self.epoch_reset()
        update=0
        for step,  batch in enumerate(data):
            self.model.train()

            input_ids,src_len, input_mask, segment_ids, src_token,label_ids,tgt_len,tgt_token = batch
            input_ids = input_ids.to(self.device)
            input_mask = input_mask.to(self.device)
            segment_ids = segment_ids.to(self.device)
            label_ids = label_ids.to(self.device)

            logits = self.model(input_ids, input_mask, segment_ids)
            loss = self.criterion(output=logits, target=label_ids)
            if len(self.n_gpu) >= 2:
                loss = loss.mean()
            if self.gradient_accumulation_steps > 1:
                loss = loss / self.gradient_accumulation_steps
            else:
                loss.backward()
                clip_grad_norm_(self.model.parameters(), self.grad_clip)

            if (step + 1) % self.gradient_accumulation_steps == 0:
                self.optimizer.step()
                self.optimizer.zero_grad()
                self.global_step += 1
            tr_loss.update(loss.item(), n=1)

            #存储训练过程中的输出和目标值
            self.outputs.append(logits.cpu().detach())
            self.targets.append(label_ids.cpu().detach())
            update+=1
            progress_bar(update, self.config.train_nbatchs)

        print("\n------------- train result --------------")
        # epoch metric
        self.outputs = torch.cat(self.outputs, dim=0).cpu().detach()
        self.targets = torch.cat(self.targets, dim=0).cpu().detach()
        self.result['loss'] = tr_loss.avg
        if self.epoch_metrics:
            for metric in self.epoch_metrics:
                metric(logits=self.outputs, target=self.targets)
                value = metric.value()
                if value:
                    self.result[f'{metric.name()}'] = value
        if "cuda" in str(self.device):
            torch.cuda.empty_cache()
        return self.result
Пример #19
0
    def train_epoch(self, epoch):

        print('\nEpoch: {}'.format(epoch))
        logging.info('Epoch: {}'.format(epoch))
        self.net.train()
        train_loss = 0
        correct = 0
        total = 0
        for batch_idx, (inputs, targets) in enumerate(self.trainloader):
            inputs, targets = inputs.to(self.device), targets.to(self.device)
            self.optimizer.zero_grad()
            outputs = self.net(inputs)
            loss = self.criterion(outputs, targets)
            loss.backward()
            self.optimizer.step()

            train_loss += loss.item()
            _, predicted = outputs.max(1)
            total += targets.size(0)
            correct += predicted.eq(targets).sum().item()

            tot_time = progress_bar(
                batch_idx, len(self.trainloader),
                'Loss: %.3f | Acc: %.3f%% (%d/%d)' %
                (train_loss /
                 (batch_idx + 1), 100. * correct / total, correct, total))
        logging.info('Tot: {} | Loss: {} | Acc: {}%% ({}/{})'.format(
            tot_time, train_loss / (batch_idx + 1), 100. * correct / total,
            correct, total))
Пример #20
0
def test():
    model.eval()
    print('----------------------------------------Test---------------------------------------------')
    test_loss = 0
    correct = 0
    for batch_idx, (data, target) in enumerate(testloader):
        data, target = data.cuda(), target.cuda()
        output = model(data)
        test_loss += criterion(output, target).data.item()
        pred = output.data.max(1, keepdim=True)[1]
        correct += pred.eq(target.data.view_as(pred)).cpu().sum()
        progress_bar(batch_idx, len(testloader), 'test')
    test_loss /= len(testloader.dataset)

    print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)\n'.format(
        test_loss * 8., correct, len(testloader.dataset),
        100.0 * float(correct) / len(testloader.dataset)))
Пример #21
0
    def train_bertsgm_epoch(self,data,epoch):
        self.epoch_reset()
        self.model.train()
        update=0
        for step, batch in enumerate(data):
            src, src_len, src_mask, segment_ids, original_src,tgt, tgt_len, original_tgt = batch
            self.model.zero_grad()
            src = src.to(self.device)
            tgt = tgt.to(self.device)
            src_len = src_len.to(self.device)
            if src_mask is not None:
                src_mask=src_mask.to(self.device)
            segment_ids=segment_ids.to(self.device)

            # 根据训练语料长度排序。
            lengths, indices = torch.sort(src_len, dim=0, descending=True)
            src = torch.index_select(src, dim=0, index=indices)
            tgt = torch.index_select(tgt, dim=0, index=indices)
            dec = tgt[:, :-1]
            targets = tgt[:, 1:]

            if self.config.sgm.schesamp:
                if epoch > 5:
                    e = epoch - 5
                    loss, outputs = self.model(src, src_mask, lengths, dec, targets, segment_ids,self.criterion,teacher_ratio=0.9 ** e)
                else:
                    loss, outputs = self.model(src, src_mask, lengths, dec, targets,segment_ids,self.criterion)
            else:

                loss, outputs = self.model(src, src_mask, lengths, dec, targets, segment_ids, self.criterion)

            targets = targets.t()
            # 总共标签数量
            num_total = targets.ne(dict_helper.PAD).sum().item()
            if self.config.sgm.max_split == 0:
                loss = torch.sum(loss) / num_total
                loss.backward()
            self.optimizer.step()

            #打印进度
            update += 1
            progress_bar(update, self.config.train_nbatchs)

        # 更新学习率
        self.optimizer.updateLearningRate(score=0, epoch=epoch)
Пример #22
0
def wider_ped_eval(input, gt, ignore_file):
    aap = []
    threads = []
    for ove in np.arange(0.5, 1.0, 0.05):
        # pedestrian_eval(aap, input, gt,ignore_file, ovthresh=ove)
        t = threading.Thread(target=pedestrian_eval,
                             args=(aap, input, gt, ignore_file),
                             kwargs={'ovthresh': ove})
        threads.append(t)
        t.start()
        time.sleep(5)

    print("Total threads:{}".format(len(threads)))
    for index, t in enumerate(threads):
        progress_bar(index, len(threads), " executing.")
        t.join()
    mAP = np.average(aap)
    return mAP
Пример #23
0
def train(epoch):
    global best_acc
    print('\nEpoch: %d' % epoch)
    net.train()
    train_loss = 0
    correct = 0
    total = 0
    for batch_idx, (inputs, targets) in enumerate(trainloader):
        inputs, targets = inputs.to(device), targets.to(device)
        optimizer.zero_grad()
        outputs = net(inputs)
        loss = criterion(outputs, targets)
        loss.backward()
        optimizer.step()

        train_loss += loss.item()
        _, predicted = outputs.max(1)
        total += targets.size(0)
        correct += predicted.eq(targets).sum().item()

        progress_bar(
            batch_idx, len(trainloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)' %
            (train_loss /
             (batch_idx + 1), 100. * correct / total, correct, total))

    # Save checkpoint.
    acc = 100. * correct / total
    if acc > best_acc:
        print('Saving..')
        state = {
            'net': net.state_dict(),
            'acc': acc,
            'epoch': epoch,
        }
        if not os.path.isdir('../outputs/checkpoint/'):
            os.makedirs('../outputs/checkpoint/')
        torch.save(
            state, '../outputs/checkpoint/' + str(model_name) + '_' +
            str(args.client) + '.t7')
        best_acc = acc

    if best_acc > 99.5: exit()
Пример #24
0
    def test_epoch(self, epoch, checkpoint='ckpt.t7'):

        self.net.eval()
        test_loss = 0
        correct = 0
        total = 0
        tot_time = ''
        with torch.no_grad():
            for batch_idx, (inputs, targets) in enumerate(self.validloader):
                inputs, targets = inputs.to(self.device), targets.to(
                    self.device)
                outputs = self.net(inputs)
                loss = self.criterion(outputs, targets)

                test_loss += loss.item()
                _, predicted = outputs.max(1)
                total += targets.size(0)
                correct += predicted.eq(targets).sum().item()

                tot_time = progress_bar(
                    batch_idx, len(self.validloader),
                    'Loss: %.3f | Acc: %.3f%% (%d/%d)' %
                    (test_loss /
                     (batch_idx + 1), 100. * correct / total, correct, total))
        logging.info('Tot: {} | Loss: {} | Acc: {}%% ({}/{})'.format(
            tot_time, test_loss / (batch_idx + 1), 100. * correct / total,
            correct, total))

        # Save checkpoint.
        acc = 100. * correct / total
        if acc > self.best_acc:
            self.best_acc = acc
            self.best_epoch = epoch
            print('Saving..')
            logging.info('Saving..')
            state = {
                'net': self.net.state_dict(),
                'cri': self.criterion.state_dict(),
                'opt': self.optimizer.state_dict(),
                'sch': self.scheduler.state_dict(),
                'max_epoch': self.max_epoch,
                'acc': self.best_acc,
                'epoch': epoch,
                'best_epoch': self.best_epoch
            }
            if not os.path.isdir('checkpoint'):
                os.mkdir('checkpoint')
            torch.save(state, './checkpoint/' + checkpoint)

        # Early stop
        if epoch - self.best_epoch > self.early_stopping:
            print('Early stopping..')
            logging.info('Early stopping..')
            self.terminate = True
Пример #25
0
def evaluate():
    # build dataset
    val_loader, n_class = get_dataset()
    # build model
    net = get_model(n_class)

    criterion = nn.CrossEntropyLoss()

    if use_cuda:
        net = net.cuda()
        net = torch.nn.DataParallel(net, list(range(args.n_gpu)))
        cudnn.benchmark = True

    # begin eval
    net.eval()

    batch_time = AverageMeter()
    losses = AverageMeter()
    top1 = AverageMeter()
    top5 = AverageMeter()
    end = time.time()

    with torch.no_grad():
        for batch_idx, (inputs, targets) in enumerate(val_loader):
            if use_cuda:
                inputs, targets = inputs.cuda(), targets.cuda()
            inputs, targets = Variable(inputs), Variable(targets)
            outputs = net(inputs)
            loss = criterion(outputs, targets)

            # measure accuracy and record loss
            prec1, prec5 = accuracy(outputs.data, targets.data, topk=(1, 5))
            losses.update(loss.item(), inputs.size(0))
            top1.update(prec1.item(), inputs.size(0))
            top5.update(prec5.item(), inputs.size(0))
            # timing
            batch_time.update(time.time() - end)
            end = time.time()

            progress_bar(batch_idx, len(val_loader), 'Loss: {:.3f} | Acc1: {:.3f}% | Acc5: {:.3f}%'
                         .format(losses.avg, top1.avg, top5.avg))
Пример #26
0
     def train_seq2set_epoch(self,data,epoch):
        self.epoch_reset()
        self.model.train()
        update=0
        print('epoch==',epoch)
        for step, batch in enumerate(data):
            src, src_len, src_mask, segment_ids, original_src,tgt, tgt_len, original_tgt = batch
            self.model.zero_grad()
            src = src.to(self.device)
            tgt = tgt.to(self.device)
            src_len = src_len.to(self.device)
            if src_mask is not None:
                src_mask=src_mask.to(self.device)
            segment_ids=segment_ids.to(self.device)

            # 根据训练语料长度排序。
 
            lengths, indices = torch.sort(src_len, dim=0, descending=True)
            src = torch.index_select(src, dim=0, index=indices)
            tgt = torch.index_select(tgt, dim=0, index=indices)
            src_len = torch.index_select(src_len, dim=0, index=indices)
            src_mask = torch.index_select(src_mask, dim=0, index=indices)
            segment_ids = torch.index_select(segment_ids, dim=0, index=indices)
 

            dec = tgt[:, :-1]
            targets = tgt[:, 1:]
  
            loss=self.model.compute_reward(src, src_mask,src_len, dec, targets,segment_ids)
            loss.backward()

            self.optimizer.step()

             #打印进度
            update += 1
            progress_bar(update, self.config.train_nbatchs)

         # 更新学习率
        self.optimizer.updateLearningRate(score=0, epoch=epoch)
Пример #27
0
def train(epoch):
    print('\nEpoch: %d' % epoch)
    net.train()
    train_loss = 0.0
    groups = 0

    for batch_idx, (inputs, targets) in enumerate(train_loader):
        if args.cuda:
            inputs = Variable(inputs.cuda())
            targets = Variable(targets.float().cuda())
        optimizer.zero_grad()
        outputs = net(inputs)

        loss = criterion(outputs, targets)

        loss.backward()
        optimizer.step()
        train_loss += (loss.data[0])

        groups += 1
    vis.plot('train_loss', train_loss / groups)
    progress_bar(epoch, len(train_loader),
                 'Loss: %.3f' % (train_loss / groups))
Пример #28
0
def test(epoch):
    print('\nTest')
    net.eval()
    test_loss = 0
    for batch_idx, (images, loc_targets,
                    conf_targets) in enumerate(testloader):
        if use_cuda:
            images = images.cuda()
            loc_targets = loc_targets.cuda()
            conf_targets = conf_targets.cuda()

        images = Variable(images, volatile=True)
        loc_targets = Variable(loc_targets)
        conf_targets = Variable(conf_targets)

        loc_preds, conf_preds = net(images)
        loss = criterion(loc_preds, loc_targets, conf_preds, conf_targets)
        test_loss += loss.data[0]
        print('%.3f %.3f' % (loss.data[0], test_loss / (batch_idx + 1)))
        progress_bar(
            batch_idx, len(testloader), "iter Loss: %.3f | Avg Loss: %.3f%%" %
            (loss.data[0], test_loss / (batch_idx + 1)))

    # Save checkpoint.
    global best_loss
    test_loss /= len(testloader)
    if test_loss < best_loss:
        print('Saving..')
        state = {
            'net': net.module.state_dict(),
            'loss': test_loss,
            'epoch': epoch,
        }
        if not os.path.isdir('checkpoint'):
            os.mkdir('checkpoint')
        torch.save(state, './checkpoint/ckpt.pth')
        best_loss = test_loss
Пример #29
0
def main():
    # Prepare arguments
    opt = get_arguments().parse_args()

    if opt.dataset == "mnist":
        opt.input_height = 28
        opt.input_width = 28
        opt.input_channel = 1
        netC = NetC_MNIST().to(opt.device)
    else:
        raise Exception("Invalid Dataset")

    mode = opt.attack_mode
    opt.ckpt_folder = os.path.join(opt.checkpoints, opt.dataset)
    opt.ckpt_path = os.path.join(
        opt.ckpt_folder, "{}_{}_morph.pth.tar".format(opt.dataset, mode))
    opt.log_dir = os.path.join(opt.ckpt_folder, "log_dir")

    state_dict = torch.load(opt.ckpt_path)
    print("load C")
    netC.load_state_dict(state_dict["netC"])
    netC.to(opt.device)
    netC.eval()
    netC.requires_grad_(False)
    print("load grid")
    identity_grid = state_dict["identity_grid"].to(opt.device)
    noise_grid = state_dict["noise_grid"].to(opt.device)
    print(state_dict["best_clean_acc"], state_dict["best_bd_acc"])

    # Prepare dataloader
    test_dl = get_dataloader(opt, train=False)

    for name, module in netC._modules.items():
        print(name)

    # Forward hook for getting layer's output
    container = []

    def forward_hook(module, input, output):
        container.append(output)

    hook = netC.layer3.register_forward_hook(forward_hook)

    # Forwarding all the validation set
    print("Forwarding all the validation dataset:")
    for batch_idx, (inputs, _) in enumerate(test_dl):
        inputs = inputs.to(opt.device)
        netC(inputs)
        progress_bar(batch_idx, len(test_dl))

    # Processing to get the "more important mask"
    container = torch.cat(container, dim=0)
    activation = torch.mean(container, dim=[0, 2, 3])
    seq_sort = torch.argsort(activation)
    pruning_mask = torch.ones(seq_sort.shape[0], dtype=bool)
    hook.remove()

    # Pruning times - no-tuning after pruning a channel!!!
    acc_clean = []
    acc_bd = []
    with open("mnist_{}_results.txt".format(opt.attack_mode), "w") as outs:
        for index in range(pruning_mask.shape[0]):
            net_pruned = copy.deepcopy(netC)
            num_pruned = index
            if index:
                channel = seq_sort[index - 1]
                pruning_mask[channel] = False
            print("Pruned {} filters".format(num_pruned))

            net_pruned.layer3.conv1 = nn.Conv2d(pruning_mask.shape[0],
                                                pruning_mask.shape[0] -
                                                num_pruned, (3, 3),
                                                stride=2,
                                                padding=1,
                                                bias=False)
            net_pruned.linear6 = nn.Linear(
                (pruning_mask.shape[0] - num_pruned) * 16, 512)

            # Re-assigning weight to the pruned net
            for name, module in net_pruned._modules.items():
                if "layer3" in name:
                    module.conv1.weight.data = netC.layer3.conv1.weight.data[
                        pruning_mask]
                    module.ind = pruning_mask
                elif "linear6" == name:
                    module.weight.data = netC.linear6.weight.data.reshape(
                        -1, 64,
                        16)[:, pruning_mask].reshape(512,
                                                     -1)  # [:, pruning_mask]
                    module.bias.data = netC.linear6.bias.data
                else:
                    continue
            net_pruned.to(opt.device)
            clean, bd = eval(net_pruned, identity_grid, noise_grid, test_dl,
                             opt)
            outs.write("%d %0.4f %0.4f\n" % (index, clean, bd))
Пример #30
0
def test():
    with tf.Graph().as_default(), tf.device('/gpu:0'):  # Use GPU 0
        # Training parameters
        # Count the number of training & eval data
        num_data = utils.count_text_lines(args.test_filenames_file)
        print('===> Train: There are totally %d test files' % (num_data))

        steps_per_epoch = np.ceil(num_data / args.batch_size).astype(np.int32)

        num_total_steps = args.max_epoches * steps_per_epoch
        # Load data
        data_loader = Dataloader(test_dataloader_params,
                                 shuffle=False)  # no shuffle
        # Debug test train_dataloader
        # test_synthetic_dataloader(data_loader, True)

        I1_batch = data_loader.I1_batch
        I2_batch = data_loader.I2_batch
        I1_aug_batch = data_loader.I1_aug_batch
        I2_aug_batch = data_loader.I2_aug_batch
        I_batch = data_loader.I_batch
        I_prime_batch = data_loader.I_prime_batch
        full_I_batch = data_loader.full_I_batch
        full_I_prime_batch = data_loader.full_I_prime_batch
        pts1_batch = data_loader.pts1_batch
        gt_batch = data_loader.gt_batch
        patch_indices_batch = data_loader.patch_indices_batch

        # Train on multiple GPU:
        h_losses = []
        # Create a session
        gpu_options = tf.GPUOptions(
            allow_growth=True
        )  # Does not pre-allocate large, increase if needed
        config = tf.ConfigProto(
            allow_soft_placement=True, gpu_options=gpu_options
        )  # soft_placement allows to work on CPUs if GPUs are not available

        sess = tf.Session(config=config)

        # Initialize
        sess.run(tf.global_variables_initializer())
        sess.run(tf.local_variables_initializer())
        # Threads coordinator
        coordinator = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(sess=sess, coord=coordinator)

        num_samples = 0
        total_num_fail = 0
        h_losses = []
        total_time = 0

        # Start test
        for step in range(num_total_steps):
            full_I_value, full_I_prime_value, I2_value, I1_value, I1_aug_value, I2_aug_value, I_value, I_prime_value, pts_1_value, full_gt_corr_value = sess.run(
                [
                    full_I_batch, full_I_prime_batch, I2_batch, I1_batch,
                    I1_aug_batch, I2_aug_batch, I_batch, I_prime_batch,
                    pts1_batch, gt_batch
                ])
            for i in range(args.batch_size):
                num_samples += 1
                down_ratio = 2
                I_sample = utils.denorm_img(full_I_value[i]).astype(np.uint8)
                I_prime_sample = utils.denorm_img(
                    full_I_prime_value[i]).astype(np.uint8)
                corr1_sample = full_gt_corr_value[i, 0:8].reshape([
                    4, 2
                ]) / down_ratio  # (gt is for 480x640. Here, use 240x320)
                corr2_sample = full_gt_corr_value[i, 8:16].reshape(
                    [4, 2]) / down_ratio
                # Use RANSAC_homography/ Direct method to find the homography (delta 4 points)
                sample_start_time = timeit.default_timer()
                if args.method == 'direct':
                    pred_h4p, _, not_found = direct_h.find_homography(
                        I_sample,
                        I_prime_sample,
                        corr1_sample,
                        corr2_sample,
                        visual=args.visual,
                        method=args.method,
                        num_iterations=args.num_iterations,
                        return_h_inv=False)
                # RANSAC Methods
                else:
                    pred_h4p, _, not_found = ransac_h.find_homography(
                        I_sample,
                        I_prime_sample,
                        corr1_sample,
                        corr2_sample,
                        visual=args.visual,
                        method=args.method,
                        min_match_count=args.num_features,
                        return_h_inv=False)
                sample_run_time = timeit.default_timer() - sample_start_time
                total_time += sample_run_time
                # Set maximum value for every value of delta h4p
                pred_h4p[np.where(pred_h4p >= 80)] = 80
                pred_h4p[np.where(pred_h4p <= -80)] = -80

                pred_corr2_sample = pred_h4p[0] + corr1_sample
                h_loss_value = np.sqrt(
                    np.mean(np.square(pred_corr2_sample - corr2_sample)))
                # Evaluate the result
                # There are two cases of failure
                if not_found:  # Cannot find homography
                    total_num_fail += 1
                    print('===> Fail case 1: Not found homography')

                else:
                    # H_loss if homography is identity matrix
                    h_loss_identity = np.sqrt(
                        np.mean(np.square(corr1_sample - corr2_sample)))
                    if h_loss_identity < h_loss_value:
                        print('===> Fail case 2:  error > identity')
                        total_num_fail += 1
                        h_loss_value = h_loss_identity
                h_losses.append(h_loss_value)

                _ = utils.progress_bar(
                    step * args.batch_size + i,
                    num_total_steps * args.batch_size,
                    ' Test| image %d, h_loss %.3f, h_loss_average %.3f, fail %d/%d, time %.4f'
                    % (i, h_loss_value, np.mean(h_losses), total_num_fail,
                       num_samples, sample_run_time))

                # Save visualization
                if args.save_visual:
                    # Query full images
                    img1_with_4pts = I_sample.astype(np.uint8)
                    img2_with_4pts = I_prime_sample.astype(np.uint8)
                    # Draw prediction
                    cv2.polylines(img2_with_4pts,
                                  np.int32([pred_corr2_sample]), 1,
                                  (5, 225, 225), 3)

                    point_color = (0, 255, 255)
                    line_color_set = [(255, 102, 255), (51, 153, 255),
                                      (102, 255, 255), (255, 255, 0),
                                      (102, 102, 244), (150, 202, 178),
                                      (153, 240, 142), (102, 0, 51),
                                      (51, 51, 0)]
                    # Draw 4 points (ground truth)
                    full_stack_images = utils.draw_matches(
                        img1_with_4pts,
                        corr1_sample,
                        img2_with_4pts,
                        corr2_sample,
                        'tmp.jpg',
                        color_set=line_color_set,
                        show=False)

                    # Save image
                    visual_file_name = os.path.join(
                        args.results_dir,
                        str(step * args.batch_size + i) + '_loss_' +
                        str(h_loss_value) + '.jpg')
                    #cv2.putText(full_stack_images, 'RMSE %.2f'%h_loss,(800, 100), cv2.FONT_HERSHEY_SIMPLEX, 1,(0,0,255),2)
                    cv2.imwrite(visual_file_name, full_stack_images)
                    print('Wrote file %s', visual_file_name)

        result_dict = {
            'method': args.method,
            'h_losses': h_losses,
            'h_loss_mu': np.mean(h_losses),
            'h_loss_std': np.std(h_losses)
        }
        import cPickle as pickle
        result_file_dir = os.path.join(args.results_dir, 'h_losses.pkl')
        with open(result_file_dir, 'wb') as f:
            pickle.dump(result_dict, f)
            print('===> Successfully write results  to  %s' % result_file_dir)
        print('==========================================================')
        mean_h_loss, std_h_loss = np.mean(np.array(h_losses)), np.std(
            np.array(h_losses))
        print('===> H_loss:', mean_h_loss, '+/-', std_h_loss)
        print('Running time:', total_time / num_samples)
        fail_percent = total_num_fail * 1.0 / (num_samples)
        print('Failure %.3f' % (fail_percent))
        output_line = [
            num_samples, mean_h_loss, std_h_loss, fail_percent,
            total_time / num_samples
        ]
        print('output_line:', output_line)
        with open(os.path.join(args.log_dir, 'results.txt'), 'w') as f:
            np.savetxt(f, [output_line], delimiter=' ', fmt='%.5f')
            print('===> Wrote results to file %s' %
                  os.path.join(args.log_dir, 'results.txt'))
        tops_list = utils.find_percentile(h_losses)
        print('===> Percentile Values: (20, 50, 80, 100):')
        print(tops_list)
        print('======> End! ====================================')