def main():
    # torch.cuda.set_device(1)
    os.environ["CUDA_VISIBLE_DEVICES"] = "1"
    opt = parse_opts()
    model = resnext.resnet101(
        num_classes=opt.n_finetune_classes,
        shortcut_type=opt.resnet_shortcut,
        cardinality=opt.resnext_cardinality,
        sample_size=opt.sample_size,
        sample_duration=opt.sample_duration)
    model.cuda()
    # print(model.cuda())
    model = nn.DataParallel(model, device_ids=None)
    checkpoint = torch.load(
        'trained_models/best-4-1.pth.tar')
    model.load_state_dict(checkpoint['state_dict'])
    best_score = checkpoint['best_prec1']
    print(best_score)
    # for param_group in optimizer['param_groups']:
    #     print(param_group)
    # model.cpu()
    # model.cuda()
    model.eval()
    rgb_mean = [0.485, 0.456, 0.406]
    rgb_std = [0.229, 0.224, 0.225]
    opt.scales = [1]
    transform_val = Compose([
        MultiScaleCornerCrop(opt.scales, opt.sample_size,
                             crop_positions=['c']),
        ToTensor(),
        Normalize(rgb_mean, rgb_std),
    ])
def test_on_testset():
    os.environ["CUDA_VISIBLE_DEVICES"] = "1"
    args = parse_opts()
    data_loader = get_iterator(args, isTrain=False)
    acc_meter = tnt.meter.AverageValueMeter()
    model = resnext.resnet101(num_classes=args.n_finetune_classes,
                              shortcut_type=args.resnet_shortcut,
                              cardinality=args.resnext_cardinality,
                              sample_size=args.sample_size,
                              sample_duration=args.sample_duration)
    model.cuda()
    model = nn.DataParallel(model, device_ids=None)
    model.load_state_dict(
        torch.load('trained_models/checkpoint.pth.tar')['state_dict'])
    model.eval()
    total = 0
    result = {}
    with torch.no_grad():
        for data in data_loader:
            input = data[0].cuda()
            label = data[1].cuda()
            video_id = data[2]
            output = torch.sigmoid(model(input))
            label_indexes = (label == 1)
            acc, bt, _ = calculate_accuracy(output,
                                            label,
                                            video_id=video_id,
                                            thresh_hold=0.4)
            for i, vid in enumerate(video_id):
                if vid not in result:
                    result[vid] = []
                    result[vid].append(
                        label_indexes[i].nonzero().squeeze(1).tolist())

                if sum(output[i] > 0.4) > 0:
                    indexes = (output[i] > 0.4)
                else:
                    indexes = (output[i] >= output[i].max(0)[0])
                tmp_index = indexes.nonzero()
                if len(tmp_index) <= 0:
                    indx = []
                else:
                    indx = indexes.nonzero().squeeze(1).tolist()
                result[vid].append(indx)

            total += bt
            acc_meter.add(acc, bt)
            print(
                'Now tested %d samples,batch Average Acc is %.4f, Average Acc is %.4f'
                % (total, acc / bt, acc_meter.value()[0]))
            # print(result)
    torch.save(result, './result14-0.4-max.pkl')
def main():
    opt = parse_opts()
    model = resnext.resnet101(num_classes=opt.n_classes,
                              shortcut_type=opt.resnet_shortcut,
                              cardinality=opt.resnext_cardinality,
                              sample_size=opt.sample_size,
                              sample_duration=opt.sample_duration)
    model.load_state_dict(torch.load('./trained_models/best.pth.tar'))
    model = model.cuda()

    model = nn.DataParallel(model, device_ids=[0])
    clip = video_loader(
        root_path='/home/zengh/Dataset/AIChallenger/test/group0/1000007124',
        frame_indices=range(16))
    indexes = test(clip, model)
    print(indexes)
def initial_model():
    opt = parse_opts()
    model = resnext.resnet101(num_classes=opt.n_finetune_classes,
                              shortcut_type=opt.resnet_shortcut,
                              cardinality=opt.resnext_cardinality,
                              sample_size=opt.sample_size,
                              sample_duration=opt.sample_duration)

    model = model.cuda()
    model = nn.DataParallel(model, device_ids=None)
    model_path = './trained_models/best.pth10.tar'
    if not os.path.exists(model_path):
        print("model path is not true!!")
        return

    model.load_state_dict(
        torch.load('./trained_models/best.pth10.tar')['state_dict'])
    model.eval()
    return model
def predict(model, sindex):
    start_time = time.time()
    opt = parse_opts()
    model = resnext.resnet101(num_classes=opt.n_finetune_classes,
                              shortcut_type=opt.resnet_shortcut,
                              cardinality=opt.resnext_cardinality,
                              sample_size=opt.sample_size,
                              sample_duration=opt.sample_duration)

    model = model.cuda()
    model = nn.DataParallel(model, device_ids=None)
    model.load_state_dict(
        torch.load('./trained_models/best.pth.tar')['state_dict'])
    duration = (time.time() - start_time) * 1000
    print('restore time %.3f ms' % duration)

    model.eval()
    rgb_mean = [0.485, 0.456, 0.406]
    rgb_std = [0.229, 0.224, 0.225]
    opt.scales = [1]
    transform_val = Compose([
        MultiScaleCornerCrop(opt.scales, opt.sample_size,
                             crop_positions=['c']),
        ToTensor(),
        Normalize(rgb_mean, rgb_std),
    ])
    start_time = time.time()
    clip = video_loader(
        root_path='/home/zengh/Dataset/AIChallenger/train/group5/567700300',
        frame_indices=range(3, 19),
        transform=transform_val)
    clip = clip.unsqueeze(0)
    #print("clip",clip)
    duration = (time.time() - start_time) * 1000
    print('pic time %.3f ms' % duration)
    #print("clip",clip.shape)
    start_time = time.time()
    indexes = test(clip, model)
    duration = (time.time() - start_time) * 1000
    print('pre time %.3f ms' % duration)
        self.save_root = args.log_dir

    def save(self, epoch, metrics_info):
        ap = metrics_info['logger']['attr']['ap'][-1]
        if ap > self.max_ap:
            self.max_ap = ap
            save_file_path = os.path.join(self.save_root, 'ap{}'.format(ap))
            torch.save(s_net.module.state_dict(), save_file_path)

            logger_file("val: Validation Results - Epoch: {} - LR: {}".format(epoch, optimizer.optimizer.param_groups[0]['lr']))
            print_summar_table(logger_file, attr_name, metrics_info['logger'])
            logger_file('AP:%0.3f' % metrics_info['logger']['attr']['ap'][-1])


parser = argparse.ArgumentParser(description='PyTorch my data Training')
args = parse_opts()
max_epoch = args.max_epoch - args.distill_epoch
device = 'cuda' if torch.cuda.is_available() else 'cpu'
log = Logger('both', filename=os.path.join(args.log_dir, args.log_file + '_all'), level='debug', mode='both')
logger = log.logger.info
log_config(args, logger)
log_file = Logger('file', filename=os.path.join(args.log_dir, args.log_file), level='debug', mode='file')
logger_file = log_file.logger.info
attr, attr_name = get_tasks(args)
criterion_CE, metrics = get_losses_metrics(attr, args.categorical_loss)

# Load dataset, net, evaluator, Saver
trainloader, testloader = get_data(args, attr, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
t_net, s_net, channel_t, channel_s, layer_t, layer_s, index, filter_list = \
    get_pair_model(args.size, frm='my', name_t=args.name_t, name_s=args.name_s,
                   load_BN=args.load_BN, logger=logger, bucket=args.bucket, classifier=args.classifier)
예제 #7
0
import torch
from utils.get_tasks import get_tasks
from models.generate_model import get_model
from utils.opts import parse_opts


def jit_trace(args, model_path):
    attr, attr_name = get_tasks(args)
    model, _, _ = get_model(args.conv,
                            classifier=args.classifier,
                            dropout=args.dropout,
                            attr=attr)
    model.load_state_dict(torch.load(model_path))
    model.cpu()
    example = torch.rand(1, 3, 224, 224)
    a = torch.jit.trace(model.eval(), example)
    # a.save('{}.pt'.format(args.conv))
    a.save('{}.pt'.format('ap22'))
    print('transform succeed')


if __name__ == '__main__':

    arg = parse_opts()
    # model_path = '/root/.torch/models/modelofnewdata'
    model_path = '/root/models/distillation/log/teacher/ap0.2291'
    jit_trace(arg, model_path)
def main():

    #994513477.mp4 995153247.mp4 996259932.mp4
    start_time = time.time()
    video_path = '/home/zengh/Dataset/AIChallenger/group5/995153247.mp4'
    if os.path.exists(video_path):
        print("exists!")

    cap = cv2.VideoCapture(video_path)  #15ms
    duration = (time.time() - start_time) * 1000
    print('1 time %.3f ms' % duration)

    start_time = time.time()
    print(id(cv2.CAP_PROP_POS_FRAMES))
    #cap.set(cv2.CAP_PROP_POS_FRAMES,50) #40ms
    #print("id",id(cv2.CAP_PROP_POS_FRAMES))
    duration = (time.time() - start_time) * 1000
    print('2 time %.3f ms' % duration)

    start_time = time.time()
    ret, frame = cap.read()  #1ms
    duration = (time.time() - start_time) * 1000
    #print("ret",ret)
    print('3 time %.3f ms' % duration)
    '''
    count = 1
    frames = []
    
    while(1):
        ret, frame = cap.read()
        if frame is None:
            break
        if  count % 5 == 0:
            frames.append(frame)
        count = count + 1'''

    #v = pims.Video('/home/zengh/Dataset/AIChallenger/group5/982006190.mp4')
    #duration = (time.time() - start_time) * 1000
    #print('cv video time %.3f ms' % duration)

    opt = parse_opts()
    start_time = time.time()
    model = resnext.resnet101(num_classes=opt.n_finetune_classes,
                              shortcut_type=opt.resnet_shortcut,
                              cardinality=opt.resnext_cardinality,
                              sample_size=opt.sample_size,
                              sample_duration=opt.sample_duration)
    model = model.cuda()
    model = nn.DataParallel(model, device_ids=None)
    model.load_state_dict(
        torch.load('./trained_models/best.pth10.tar')['state_dict'])
    duration = (time.time() - start_time) * 1000
    print('restore time %.3f ms' % duration)

    #model = nn.DataParallel(model)

    model.eval()
    rgb_mean = [0.485, 0.456, 0.406]
    rgb_std = [0.229, 0.224, 0.225]
    opt.scales = [1]
    transform_val = Compose([
        MultiScaleCornerCrop(opt.scales, opt.sample_size,
                             crop_positions=['c']),
        ToTensor(),
        Normalize(rgb_mean, rgb_std),
    ])
    start_time = time.time()
    clip = video_loader(
        root_path='/home/zengh/Dataset/AIChallenger/train/group5/567700300',
        frame_indices=range(3, 19),
        transform=transform_val)
    clip = clip.unsqueeze(0)
    print("clip", clip)
    duration = (time.time() - start_time) * 1000
    print('pic time %.3f ms' % duration)
    #print("clip",clip.shape)
    start_time = time.time()
    indexes = test(clip, model)
    duration = (time.time() - start_time) * 1000
    print('pre time %.3f ms' % duration)
예제 #9
0
import os
import numpy as np
import torch
from torchvision.transforms import Compose, Resize, ToTensor, \
    RandomHorizontalFlip, Normalize, RandomRotation, ColorJitter

from data.transforms import square_no_elastic, get_inference_transform_person_lr
from utils.opts import parse_opts
from data.image_loader import opencv_loader, cv_to_pil_image
import cv2
from model.cubenet import CubeNet
from utils.get_tasks import get_tasks
import matplotlib.pyplot as plt

opt = parse_opts()
opt.pretrain = False


def get_input(cuda=True, transform=None, box=None, path=None):
    pic_path = opt.img_path if not path else path
    mean, std = [0.485, 0.456, 0.406], [0.229, 0.224, 0.225]
    val_img_transform = Compose([
        square_no_elastic,
        Resize((opt.person_size, opt.person_size)),
        ToTensor(),
        Normalize(mean, std)
    ])
    img_ori = cv2.imread(pic_path)

    img = opencv_loader(pic_path)
    if transform:
def main():
    args = parse_opts()
    engine = Engine()
    # define meters and loggers
    loss_meter = tnt.meter.AverageValueMeter()
    map_meter = tnt.meter.mAPMeter()
    acc_meter = tnt.meter.AverageValueMeter()
    batch_meter = tnt.meter.AverageValueMeter()
    # batchtime_meter = tnt.meter.AverageValueMeter()
    env_name = 'AIChallenger-exp2'
    port = 8097
    train_loss_logger = VisdomPlotLogger('line',
                                         port=port,
                                         env=env_name,
                                         opts={'title': 'Train Loss'})
    train_map_logger = VisdomPlotLogger('line',
                                        port=port,
                                        env=env_name,
                                        opts={'title': 'Train mAP'})
    test_loss_logger = VisdomPlotLogger('line',
                                        port=port,
                                        env=env_name,
                                        opts={'title': 'Test Loss'})
    test_map_logger = VisdomPlotLogger('line',
                                       port=port,
                                       env=env_name,
                                       opts={'title': 'Test mAP'})
    train_acc_logger = VisdomPlotLogger('line',
                                        port=port,
                                        env=env_name,
                                        opts={'title': 'Train acc'})
    test_acc_logger = VisdomPlotLogger('line',
                                       port=port,
                                       env=env_name,
                                       opts={'title': 'Test acc'})

    # generate model
    model, params = generate_model(args)
    # print(model)
    best_prec1 = 0
    # ==============define loss function (criterion),optimezer,scheduler
    criterion = nn.MultiLabelSoftMarginLoss()
    optimizer = torch.optim.SGD(params,
                                lr=args.learning_rate,
                                momentum=args.momentum,
                                dampening=args.dampening,
                                weight_decay=args.weight_decay,
                                nesterov=args.nesterov)

    # for param_group in optimizer.state_dict()['param_groups']:
    #             print(param_group)
    # optionally resume from a checkpoint
    if args.resume:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)
            args.begin_epoch = checkpoint['epoch']
            # best_prec1 = checkpoint['best_prec1']
            model.load_state_dict(checkpoint['state_dict'])
            optimizer.load_state_dict(checkpoint['optimizer'])
            print("=> loaded checkpoint '{}' (epoch {})".format(
                args.resume, checkpoint['epoch']))
            # for param_group in optimizer.state_dict()['param_groups']:
            #     print(param_group)
            for param_group in optimizer.param_groups:
                # print(param_group['lr'])
                # if param_group['lr'] < 0.01:
                param_group['weight_decay'] /= 100

            del checkpoint
            # optimizer.state_dict()['param_groups'] = tmp_param_groups

        else:
            print("=> no checkpoint found at '{}'".format(args.resume))

    for param_group in optimizer.state_dict()['param_groups']:
        print(param_group)

    scheduler = lr_scheduler.ReduceLROnPlateau(optimizer,
                                               'min',
                                               patience=args.lr_patience)

    def h(sample, isTrain=True):
        if isTrain:
            model.train()
        else:
            model.eval()
        output = model(sample[0])
        target = sample[1]
        # print(target.size(),output.size())
        return criterion(output, target), output

    def reset_meters():
        map_meter.reset()
        loss_meter.reset()
        acc_meter.reset()
        batch_meter.reset()

    def on_sample(state):
        if not args.no_cuda:
            state['sample'] = [s.cuda() for s in state['sample'][0:2]]

    def on_forward(state):
        #using sigmoid to fit MultiLabelSoftMarginLoss
        _output = torch.sigmoid(state['output'].data)
        _target = state['sample'][1]
        acc, n_batch, _ = calculate_accuracy(_output, _target)
        map_meter.add(_output, _target)
        loss_meter.add(state['loss'].item())
        acc_meter.add(acc, n_batch)
        if (state['t'] % 100 == 0):
            print('Batch-%d loss: %.4f, accuracy: %.4f' %
                  (state['t'], state['loss'].item(), acc))

    def on_start(state):
        state['best_score'] = best_prec1  # to save the best score

    def on_start_epoch(state):
        reset_meters()
        state['iterator'] = tqdm(state['iterator'])

    def on_end_epoch(state):
        print('Training loss: %.4f, accuracy: %.4f,map: %.4f%%' %
              (loss_meter.value()[0], acc_meter.value()[0],
               map_meter.value()[0]))
        train_loss_logger.log(state['epoch'], loss_meter.value()[0])
        train_map_logger.log(state['epoch'], map_meter.value()[0])
        train_acc_logger.log(state['epoch'], acc_meter.value()[0])

        # do validation at the end of each epoch
        reset_meters()
        engine.test(h, get_iterator(args, False))
        test_loss_logger.log(state['epoch'], loss_meter.value()[0])
        test_map_logger.log(state['epoch'], map_meter.value()[0])
        test_acc_logger.log(state['epoch'], acc_meter.value()[0])
        # remember best map and save checkpoint
        now_acc = acc_meter.value()[0]
        scheduler.step(loss_meter.value()[0])
        # print(now_acc.type(),state['best_score'].type())
        is_best = now_acc > state['best_score']
        state['best_score'] = max(now_acc, state['best_score'])
        save_checkpoint(
            {
                'epoch': state['epoch'] + 1,
                'arch': args.model,
                'state_dict': model.state_dict(),
                'best_prec1': state['best_score'],
                'optimizer': optimizer.state_dict(),
            }, args, is_best, state['epoch'])

        print('Testing loss: %.4f, accuracy: %.4f,map: %.4f%%' %
              (loss_meter.value()[0], acc_meter.value()[0],
               map_meter.value()[0]))

    engine.hooks['on_sample'] = on_sample
    engine.hooks['on_forward'] = on_forward
    engine.hooks['on_start_epoch'] = on_start_epoch
    engine.hooks['on_end_epoch'] = on_end_epoch
    engine.hooks['on_start'] = on_start

    engine.train(h,
                 get_iterator(args, True),
                 maxepoch=args.n_epochs,
                 optimizer=optimizer)