Ejemplo n.º 1
0
    def evaluate(self, model, print_frequency=10):
        '''
            Evaluate the accuracy of the model
            
            Input:
                `model`: model to be evaluated.
                `print_frequency`: how often to print evaluation info.
                
            Output:
                accuracy: (float) (0~100)
        '''

        model = model.cuda()
        model.eval()
        acc = .0
        num_samples = .0
        with torch.no_grad():
            for i, (input, target) in enumerate(self.val_loader):
                input, target = input.cuda(), target.cuda()
                pred = model(input)
                pred = pred.argmax(dim=1)
                batch_acc = torch.sum(target == pred)
                acc += batch_acc.item()
                num_samples += pred.shape[0]

                if i % print_frequency == 0:
                    fns.update_progress(i, len(self.val_loader))
                    print(' ')
        print(' ')
        print('Test accuracy: {:4.2f}% '.format(float(acc / num_samples *
                                                      100)))
        print(
            '==================================================================='
        )
        return acc / num_samples * 100
Ejemplo n.º 2
0
def eval(test_loader, model, args):
    batch_time = AverageMeter()
    acc = AverageMeter()

    # switch to eval mode
    model.eval()

    end = time.time()
    for i, (images, target) in enumerate(test_loader):
        if not args.no_cuda:
            images = images.cuda()
            target = target.cuda()
        output = model(images)
        batch_acc = compute_accuracy(output, target)
        acc.update(batch_acc, images.size(0))
        batch_time.update(time.time() - end)
        end = time.time()

        # Update statistics
        estimated_time_remained = batch_time.get_avg()*(len(test_loader)-i-1)
        fns.update_progress(i, len(test_loader), 
            ESA='{:8.2f}'.format(estimated_time_remained)+'s',
            acc='{:4.2f}'.format(float(batch_acc))
            )
    print()
    print('Test accuracy: {:4.2f}% (time = {:8.2f}s)'.format(
            float(acc.get_avg()), batch_time.get_avg()*len(test_loader)))
    print('===================================================================')
    return float(acc.get_avg())
    def evaluate(self, model, val_loader, print_frequency=10):
        '''
            Evaluate the accuracy of the model
            
            Input:
                `model`: model to be evaluated.
                `print_frequency`: how often to print evaluation info.
                
            Output:
                accuracy: (float) (0~100)
        '''
        evaluate_begin = datetime.datetime.now()
        model = model.cuda()
        model.eval()
        acc = .0
        num_samples = .0
        iterations = 0
        with torch.no_grad():
            for i, data in enumerate(val_loader):
                input = data[0]["data"].cuda(non_blocking=True)
                target = data[0]["label"].squeeze().long().cuda(
                    non_blocking=True)
                # input, target = input.cuda(), target.cuda()
                pred = model(input)
                pred = pred.argmax(dim=1)
                batch_acc = torch.sum(target == pred)
                acc += batch_acc.item()
                num_samples += pred.shape[0]

                if i % print_frequency == 0:
                    fns.update_progress(i, 10010)
                    print(' ')

                iterations += 1
                # TODO(zhaoyx): for large dataset
                if iterations > 400:
                    break
        print(' ')
        #TODO(zhaoyx): fix bug
        if num_samples < 1:
            print('no data, accuracy set to -1')
            return -1
        evaluate_end = datetime.datetime.now()
        print('Evaluate time: {} seconds, {} interations'.format(
            (evaluate_end - evaluate_begin).seconds, iterations))
        print('Test accuracy: {:4.2f}% '.format(float(acc / num_samples *
                                                      100)))
        print(
            '==================================================================='
        )
        return acc / num_samples * 100
Ejemplo n.º 4
0
def save_progress(block, addresses, new_addresses, progress_file, with_db):
    """
    Saves the work we've completed. If with_db is 1, we will write the 
    new_addresses to the database.
    """
    print("Saving progress... do not close/power off computer until process is"
          " complete.")
    update_progress(block, addresses, progress_file)
    if with_db == 1:
        print("Updating database...")
        update_db(new_addresses)
    else:
        print("Skipping database update.")
    print("Progress saved. Last block scanned was {}.".format(block))
Ejemplo n.º 5
0
    def evaluate(self, model, print_frequency=10):
        '''
            Evaluate the accuracy of the model
            
            Input:
                `model`: model to be evaluated.
                `print_frequency`: how often to print evaluation info.
                
            Output:
                accuracy: (float) (0~100)
        '''
        
        model = model.cuda()
        model.eval()
        acc = .0
        num_samples = .0
        with torch.no_grad():
            for i, (input, target) in enumerate(self.val_loader):
                input, target = input.cuda(), target.cuda()
                pred = model(input)
                # pred = pred.argmax(dim=1)
                # batch_acc = torch.sum(target == pred)
                # acc += batch_acc.item()
                # num_samples += pred.shape[0]
                valid_mask = ((target > 0) + (pred > 0)) > 0
                metric_output = 1e3 * pred[valid_mask]
                metric_target = 1e3 * target[valid_mask]
                maxRatio = torch.max(metric_output / metric_target, metric_target / metric_output)
                delta1 = float((maxRatio < 1.25).float().mean())

                # inv_output = 1.0 / pred
                # inv_target = 1.0 / target
                # abs_inv_diff = (inv_output - inv_target).abs()
                # irmse = math.sqrt((torch.pow(abs_inv_diff, 2)).mean())

                if i % print_frequency == 0:
                    fns.update_progress(i, len(self.val_loader))
                    print(' ')
        print(' ')
        print('Test delta1: {:4.2f}% '.format(float(delta1)))
        print('===================================================================')
        return delta1
Ejemplo n.º 6
0
def device_train(train_loader, model, args):
    batch_time = AverageMeter()
    losses = AverageMeter()
    acc = AverageMeter()

    # switch to train mode
    model = model.cuda()
    model.train()

    criterion = torch.nn.BCEWithLogitsLoss()
    if args.dataset == 'imagenet':
        criterion = torch.nn.CrossEntropyLoss()

    criterion.cuda()
    optimizer = torch.optim.SGD(model.parameters(), args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)

    print('===================================================================')
        
    for k in range(args.local_epochs):
        for i, (images, target) in enumerate(train_loader):
            target.unsqueeze_(1)
            target_onehot = torch.FloatTensor(target.shape[0], get_cls_num(args.dataset))
            target_onehot.zero_()
            target_onehot.scatter_(1, target, 1)
            target.squeeze_(1)
            
            images = images.cuda()
            target_onehot = target_onehot.cuda()
            target = target.cuda()

            output = model(images)
            if args.dataset == 'imagenet':
                loss = criterion(output, target)
            else:
                loss = criterion(output, target_onehot)

            # measure accuracy and record loss
            batch_acc = compute_accuracy(output, target)
        
            losses.update(loss.item(), images.size(0))
            acc.update(batch_acc, images.size(0))

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            # measure elapsed time
            batch_time.update(time.time() - end)
            end = time.time()
        
            # Update statistics
            estimated_time_remained = batch_time.get_avg()*(len(train_loader)-i-1)
            fns.update_progress(i, len(train_loader), 
                ESA='{:8.2f}'.format(estimated_time_remained)+'s',
                loss='{:4.2f}'.format(loss.item()),
                acc='{:4.2f}%'.format(float(batch_acc))
                )


    return model
Ejemplo n.º 7
0
def train(train_loader, model, criterion, optimizer, epoch, num_classes, args):
    batch_time = AverageMeter()
    losses = AverageMeter()
    acc = AverageMeter()

    # switch to train mode
    model = model.cuda()
    model.train()
    
    print('===================================================================')
    end = time.time()
    
    for i, (images, target) in enumerate(train_loader):
        target.unsqueeze_(1)
        target_onehot = torch.FloatTensor(target.shape[0], num_classes)
        target_onehot.zero_()
        target_onehot.scatter_(1, target, 1)
        target.squeeze_(1)
        
        if not args.no_cuda:
            images = images.cuda()
            target_onehot = target_onehot.cuda()
            target = target.cuda()

        # compute output and loss
        output = model(images)

        if args.dataset == 'imagenet':
            loss = criterion(output, target)
        else:
            loss = criterion(output, target_onehot)
        
        # measure accuracy and record loss
        batch_acc = compute_accuracy(output, target)
        
        losses.update(loss.item(), images.size(0))
        acc.update(batch_acc, images.size(0))

        # compute gradient and do SGD step
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        # measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()
        
        # Update statistics
        estimated_time_remained = batch_time.get_avg()*(len(train_loader)-i-1)
        fns.update_progress(i, len(train_loader), 
            ESA='{:8.2f}'.format(estimated_time_remained)+'s',
            loss='{:4.2f}'.format(loss.item()),
            acc='{:4.2f}%'.format(float(batch_acc))
            )

        # if i > 500:
        #     break

    print()
    print('Finish epoch {}: time = {:8.2f}s, loss = {:4.2f}, acc = {:4.2f}%'.format(
            epoch+1, batch_time.get_avg()*len(train_loader), 
            float(losses.get_avg()), float(acc.get_avg())))
    print('===================================================================')
    return