def model_init_par():
    # model
    backbone = resnet50()
    classifier = BaseClassifier(nattr=6)
    model = FeatClassifier(backbone, classifier)

    # load
    checkpoint = torch.load(
        '/home/sohaibrabbani/PycharmProjects/Strong_Baseline_of_Pedestrian_Attribute_Recognition/exp_result/custom/custom/img_model/ckpt_max.pth'
    )

    # unfolded load
    # state_dict = checkpoint['state_dicts']
    # new_state_dict = OrderedDict()
    # for k, v in state_dict.items():
    #     name = k[7:]
    #     new_state_dict[name] = v
    # model.load_state_dict(new_state_dict)
    # one-liner load
    # if torch.cuda.is_available():
    #     model = torch.nn.DataParallel(model).cuda()
    #     model.load_state_dict(checkpoint['state_dicts'])
    # else:
    model.load_state_dict({
        k.replace('module.', ''): v
        for k, v in checkpoint['state_dicts'].items()
    })
    # cuda eval
    model.cuda()
    model.eval()

    # valid_transform
    height, width = 256, 192
    normalize = T.Normalize(mean=[0.485, 0.456, 0.406],
                            std=[0.229, 0.224, 0.225])
    valid_transform = T.Compose(
        [T.Resize((height, width)),
         T.ToTensor(), normalize])
    return model, valid_transform
def model_init_par():
    # model
    backbone = resnet50()
    classifier = BaseClassifier(nattr=6)
    model = FeatClassifier(backbone, classifier)

    # load
    checkpoint = torch.load(
       '/home/deep/PycharmProjects/pedestrian-attribute-recognition/exp_result/custom/custom/img_model/ckpt_max.pth')
    model.load_state_dict({k.replace('module.', ''): v for k, v in checkpoint['state_dicts'].items()})
    # cuda eval
    model.cuda()
    model.eval()

    # valid_transform
    height, width = 256, 192
    normalize = T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
    valid_transform = T.Compose([
        T.Resize((height, width)),
        T.ToTensor(),
        normalize
    ])
    return model, valid_transform
Beispiel #3
0
def main(args):
    visenv_name = args.dataset
    exp_dir = os.path.join('exp_result', args.dataset)
    model_dir, log_dir = get_model_log_path(exp_dir, visenv_name)
    stdout_file = os.path.join(log_dir, f'stdout_{time_str()}.txt')

    if args.redirector:
        print('redirector stdout')
        ReDirectSTD(stdout_file, 'stdout', False)

    pprint.pprint(OrderedDict(args.__dict__))

    print('-' * 60)
    print(f'use GPU{args.device} for training')
    print(
        f'train set: {args.dataset} {args.train_split}, test set: {args.valid_split}'
    )

    train_tsfm, valid_tsfm = get_transform(args)
    print(train_tsfm)

    train_set = AttrDataset(args=args,
                            split=args.train_split,
                            transform=train_tsfm)

    train_loader = DataLoader(
        dataset=train_set,
        batch_size=args.batchsize,
        shuffle=True,
        num_workers=4,
        pin_memory=True,
    )

    valid_set = AttrDataset(args=args,
                            split=args.valid_split,
                            transform=valid_tsfm)

    valid_loader = DataLoader(
        dataset=valid_set,
        batch_size=args.batchsize,
        shuffle=False,
        num_workers=4,
        pin_memory=True,
    )

    print(f'{args.train_split} set: {len(train_loader.dataset)}, '
          f'{args.valid_split} set: {len(valid_loader.dataset)}, '
          f'attr_num : {train_set.attr_num}')

    backbone = resnet50()
    classifier = BaseClassifier(nattr=35)
    model = FeatClassifier(backbone, classifier)

    if torch.cuda.is_available():
        model = torch.nn.DataParallel(model).cuda()

    print("reloading pretrained models")

    exp_dir = os.path.join('exp_result', args.dataset)
    model_path = os.path.join(exp_dir, args.dataset, 'img_model')
    model.load_state_dict(
        torch.load(
            '/home/sohaibrabbani/PycharmProjects/Strong_Baseline_of_Pedestrian_Attribute_Recognition/pedestrian_model/rap2_ckpt_max.pth'
        )['state_dicts'])
    # model = get_reload_weight(model_path, model)

    model.eval()
    preds_probs = []
    gt_list = []
    with torch.no_grad():
        for step, (imgs, gt_label, imgname) in enumerate(tqdm(valid_loader)):
            imgs = imgs.cuda()
            gt_label = gt_label.cuda()
            gt_list.append(gt_label.cpu().numpy())
            gt_label[gt_label == -1] = 0
            valid_logits = model(imgs)
            valid_probs = torch.sigmoid(valid_logits)
            preds_probs.append(valid_probs.cpu().numpy())

    gt_label = np.concatenate(gt_list, axis=0)
    preds_probs = np.concatenate(preds_probs, axis=0)

    valid_result = get_pedestrian_metrics(gt_label, preds_probs)

    print(
        f'Evaluation on test set, \n',
        'ma: {:.4f},  pos_recall: {:.4f} , neg_recall: {:.4f} \n'.format(
            valid_result.ma, np.mean(valid_result.label_pos_recall),
            np.mean(valid_result.label_neg_recall)),
        'Acc: {:.4f}, Prec: {:.4f}, Rec: {:.4f}, F1: {:.4f}'.format(
            valid_result.instance_acc, valid_result.instance_prec,
            valid_result.instance_recall, valid_result.instance_f1))
    def __init__(self):
        device = torch.device('cpu')
        FORCE_TO_CPU = True
        parser = argument_parser()
        args = parser.parse_args(['PETA', '--model=dpn107'])

        visenv_name = 'PETA'
        exp_dir = os.path.join('exp_result', visenv_name)
        model_dir, log_dir = get_model_log_path(exp_dir, visenv_name)
        stdout_file = os.path.join(log_dir, f'stdout_{time_str()}.txt')
        save_model_path = os.path.join(
            model_dir, 'ckpt_max_e0384293_2020-09-17_18-35-21.pth')

        if args.redirector:
            print('redirector stdout')
            ReDirectSTD(stdout_file, 'stdout', False)

        pprint.pprint(OrderedDict(args.__dict__))

        print('-' * 60)
        print(f'use GPU{args.device} for training')

        _, predict_tsfm = get_transform(args)

        valid_set = AttrDataset(args=args,
                                split=args.valid_split,
                                transform=predict_tsfm)

        args.att_list = valid_set.attr_id

        backbone = getattr(sys.modules[__name__], args.model)()

        if "dpn68" in args.model:
            net_parameter = 832
        elif "dpn" in args.model:
            net_parameter = 2688
        elif "densenet" in args.model:
            net_parameter = 1024
        else:
            net_parameter = 2048

        classifier = BaseClassifier(netpara=net_parameter,
                                    nattr=valid_set.attr_num)
        model = FeatClassifier(backbone, classifier)

        if torch.cuda.is_available() and not FORCE_TO_CPU:
            model = torch.nn.DataParallel(model).cuda()
            ckpt = torch.load(save_model_path)
            print(f'Model is served with GPU ')
        else:
            model = torch.nn.DataParallel(model)
            ckpt = torch.load(save_model_path,
                              map_location=torch.device('cpu'))
            print(f'Model is served with CPU ')

        model.load_state_dict(ckpt['state_dicts'])
        model.eval()

        # from torchsummary import summary
        # summary(model, input_size=(3, 256, 192))

        print('Total number of parameters: ',
              sum(p.numel() for p in model.parameters() if p.requires_grad))

        self.args = args
        self.predict_tsfm = predict_tsfm
        self.model = model
Beispiel #5
0
        net_parameter = 832
    elif "dpn" in args.model:
        net_parameter = 2688
    elif "densenet" in args.model:
        net_parameter = 1024
    else:
        net_parameter = 2048
        
    classifier = BaseClassifier(netpara=net_parameter, nattr=valid_set.attr_num)
    model = FeatClassifier(backbone, classifier)
    
    if torch.cuda.is_available() and not FORCE_TO_CPU:
        model = torch.nn.DataParallel(model).cuda()
        ckpt = torch.load(save_model_path)
        print(f'Model is served with GPU ')
    else:
        model = torch.nn.DataParallel(model)
        ckpt = torch.load(save_model_path, map_location=torch.device('cpu'))
        print(f'Model is served with CPU ')
    
    model.load_state_dict(ckpt['state_dicts'])
    model.eval()
    
    macs, params = get_model_complexity_info(model, (3, 256, 192),
                                             as_strings=True,
                                             print_per_layer_stat=False,
                                             verbose=False)
    data.append([trained_model, macs, params])

df = pd.DataFrame(data, columns=['model','macs','params'])
df.to_csv('flops.csv')
Beispiel #6
0
def main(args):
    visenv_name = args.dataset
    exp_dir = os.path.join('exp_result', args.dataset)
    model_dir, log_dir = get_model_log_path(exp_dir, visenv_name)
    log_name = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time()))
    stdout_file = os.path.join(log_dir, f'stdout_{log_name}.txt')
    save_model_path = os.path.join(model_dir, 'ckpt_max.pth')

    if args.redirector:
        print('redirector stdout')
        ReDirectSTD(stdout_file, 'stdout', False)

    pprint.pprint(OrderedDict(args.__dict__))

    print('-' * 60)
    print(f'use GPU{args.device} for training')
    print(
        f'train set: {args.dataset} {args.train_split}, test set: {args.valid_split}'
    )

    train_tsfm, valid_tsfm = get_transform(args)
    print(train_tsfm)

    train_set = AttrDataset(args=args,
                            split=args.train_split,
                            transform=train_tsfm)

    train_loader = DataLoader(
        dataset=train_set,
        batch_size=args.batchsize,
        shuffle=True,
        num_workers=4,
        pin_memory=True,
    )
    valid_set = AttrDataset(args=args,
                            split=args.valid_split,
                            transform=valid_tsfm)

    valid_loader = DataLoader(
        dataset=valid_set,
        batch_size=16,
        shuffle=False,
        num_workers=4,
        pin_memory=True,
    )

    print(f'{args.train_split} set: {len(train_loader.dataset)}, '
          f'{args.valid_split} set: {len(valid_loader.dataset)}, '
          f'attr_num : {train_set.attr_num}')

    labels = train_set.label
    sample_weight = labels.mean(0)

    # backbone = resnet50()
    # classifier = BaseClassifier(nattr=train_set.attr_num)
    backbone = osnet_ain_x1_0(num_classes=56, pretrained=True, loss='softmax')
    classifier = BaseClassifier_osnet(nattr=train_set.attr_num)
    model = FeatClassifier(backbone, classifier)

    if torch.cuda.is_available():
        model = torch.nn.DataParallel(model).cuda()
    #改
    criterion = CEL_Sigmoid(sample_weight)
    # criterion = multilabel_categorical_crossentropy

    param_groups = [{
        'params': model.module.finetune_params(),
        'lr': args.lr_ft
    }, {
        'params': model.module.fresh_params(),
        'lr': args.lr_new
    }]
    optimizer = torch.optim.SGD(param_groups,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay,
                                nesterov=False)
    lr_scheduler = ReduceLROnPlateau(optimizer, factor=0.1, patience=4)
    if args.training:  #训练,每隔1个epoch,验证集评估一次
        best_metric, epoch = trainer(epoch=args.train_epoch,
                                     model=model,
                                     train_loader=train_loader,
                                     valid_loader=valid_loader,
                                     criterion=criterion,
                                     optimizer=optimizer,
                                     lr_scheduler=lr_scheduler,
                                     path=save_model_path,
                                     dataset=train_set)

        print(f'{visenv_name},  best_metrc : {best_metric} in epoch{epoch}')
    else:  #仅评估,不训练
        model_path = args.best_model_path
        saved_state_dict = torch.load(model_path)['state_dicts']
        saved_state_dict = {
            k.replace('module.', ''): v
            for k, v in saved_state_dict.items()
        }
        backbone = osnet_ain_x1_0(num_classes=valid_set.attr_num,
                                  pretrained=True,
                                  loss='softmax')
        print('make model for only test')
        classifier = BaseClassifier_osnet(nattr=valid_set.attr_num)
        test_model = FeatClassifier(backbone, classifier)
        print("loading model")
        test_model.load_state_dict(saved_state_dict)
        test_model.cuda()
        test_alm(valid_loader,
                 test_model,
                 attr_num=valid_set.attr_num,
                 description=valid_set.attr_id,
                 set='test',
                 threshold=0.5)
Beispiel #7
0
if __name__ == '__main__':
    trans_flag = False
    if trans_flag:
        model_path = '../exp_result/VSP/VSP/img_model/ckpt_max.pth'
        saved_state_dict = torch.load(model_path)['state_dicts']
        saved_state_dict = {
            k.replace('module.', ''): v
            for k, v in saved_state_dict.items()
        }
        # saved_state_dict = {k.replace('backbone.conv2.0.conv1.', 'module.backbone.conv2.0.conv1.'): v for k, v in saved_state_dict.items()}
        backbone = osnet_ain_x1_0(num_classes=56,
                                  pretrained=True,
                                  loss='softmax')
        classifier = BaseClassifier_osnet(nattr=56)
        torch_model = FeatClassifier(backbone, classifier)
        torch_model.load_state_dict(saved_state_dict)
        input = torch.ones(1, 3, 256, 128)
        # torch2onnx
        # torch_out = torch.onnx.export(torch_model, input, "person_Atrr_osnet_tmp.onnx", export_params=True, verbose=True)
    else:  # inf
        # inference eg
        # att_list
        dataset_info = pickle.load(
            open('../dataset/preprocess/data/VSP/dataset.pkl', 'rb+'))
        att_list = dataset_info.attr_name
        # print(att_list)

        #
        import onnxruntime
        session = onnxruntime.InferenceSession("person_Atrr_osnet.onnx")
        print("The model expects input shape: ", session.get_inputs()[0].shape)
Beispiel #8
0
def main(args):
    os.environ['CUDA_VISIBLE_DEVICES'] = args.device
    print('load the model from:   ' + args.save_path)
    exp_dir = os.path.join(args.save_path, args.dataset, args.dataset,
                           'img_model/ckpt_max.pth')
    train_tsfm, valid_tsfm = get_transform(args)

    #valid_set = AttrDataset(args=args, split=args.valid_split, transform=valid_tsfm)
    valid_set = AttrDataset(args=args,
                            split=args.valid_split,
                            transform=valid_tsfm,
                            target_transform=None,
                            Type='val')

    valid_loader = DataLoader(
        dataset=valid_set,
        batch_size=args.batchsize,
        shuffle=False,
        num_workers=0,
        pin_memory=True,
    )
    print('have generated dataset')

    if args.model_name == 'resnet50':
        backbone = resnet50()
    if args.model_name == 'resnet18':
        backbone = resnet18()
    if args.model_name == 'resnet18_consistent':
        backbone = resnet18_consistent()
    if args.model_name == 'resnet18_stn':
        backbone = resnet18_stn()
    if args.model_name == 'resnet18_autoencoder':
        backbone = resnet18_autoencoder()
    if args.model_name == 'resnet50_dynamic_se':
        backbone = resnet50_dynamic_se()
    if args.model_name == 'resnet18_dynamic_se':
        backbone = resnet18_dynamic_se()
    if args.model_name == 'resnet18_group_se':
        backbone = resnet18_group_se()
    if args.model_name == 'resnet18_vit':
        backbone = resnet18_vit()
    if args.model_name == 'resnet18_vit_v2':
        backbone = resnet18_vit_v2()
    if args.model_name == 'resnet18_vit_v3':
        backbone = resnet18_vit_v3()
    if args.model_name == 'resnet18_vit_v4':
        backbone = resnet18_vit_v4()
    if args.model_name == 'resnet34':
        backbone = resnet34()
    if args.model_name == 'resnet18_vit_split':
        backbone = resnet18_vit_split(num_classes=valid_set.attr_num)
    if args.model_name == 'resnet18_energy_vit':
        backbone = resnet18_energy_vit(num_classes=valid_set.attr_num)
    if args.model_name == 'resnet_depth':
        backbone = resnet_depth(num_classes=valid_set.attr_num)
    if args.model_name == 'spatial_modulator':
        backbone = spatial_modulator()
    if args.model_name == 'fusion_concat':
        backbone = fusion_concat()
    classifier = BaseClassifier(nattr=valid_set.attr_num)
    model = FeatClassifier(backbone, classifier)

    if torch.cuda.is_available():
        model = torch.nn.DataParallel(model).cuda()
        #model = model.cuda()

    #loading state_dict from the model
    model.load_state_dict(torch.load(exp_dir)['state_dicts'])

    #load_ckpt(model, exp_dir)
    print('have load from the pretrained model')

    #start eval
    labels = valid_set.label
    sample_weight = labels.mean(0)
    criterion = CEL_Sigmoid(sample_weight)
    valid_loss, valid_gt, valid_probs = valid_trainer(
        model=model,
        valid_loader=valid_loader,
        criterion=criterion,
    )
    valid_result = get_pedestrian_metrics(valid_gt, valid_probs)

    #print result
    print(
        f'Evaluation on test set, \n',
        'ma: {:.4f},  pos_recall: {:.4f} , neg_recall: {:.4f} \n'.format(
            valid_result.ma, np.mean(valid_result.label_pos_recall),
            np.mean(valid_result.label_neg_recall)),
        'Acc: {:.4f}, Prec: {:.4f}, Rec: {:.4f}, F1: {:.4f}'.format(
            valid_result.instance_acc, valid_result.instance_prec,
            valid_result.instance_recall, valid_result.instance_f1))

    #for index in range(5, 35):
    for index in range(len(valid_set.attr_name)):
        print(f'{valid_set.attr_name[index]}')
        print(
            f'pos recall: {valid_result.label_pos_recall[index]}  neg_recall: {valid_result.label_neg_recall[index]}  ma: {valid_result.label_ma[index]}'
        )
Beispiel #9
0
def main(args):
    visenv_name = args.dataset
    exp_dir = os.path.join('exp_result', args.dataset)
    model_dir, log_dir = get_model_log_path(exp_dir, visenv_name)
    stdout_file = os.path.join(log_dir, f'stdout_{time_str()}.txt')
    save_model_path = os.path.join(model_dir, 'ckpt_max.pth')

    if args.redirector:
        print('redirector stdout')
        ReDirectSTD(stdout_file, 'stdout', False)

    pprint.pprint(OrderedDict(args.__dict__))

    print('-' * 60)
    print(f'use GPU{args.device} for training')
    print(
        f'train set: {args.dataset} {args.train_split}, test set: {args.valid_split}'
    )

    train_tsfm, valid_tsfm = get_transform(args)
    print(train_tsfm)

    train_set = AttrDataset(args=args,
                            split=args.train_split,
                            transform=train_tsfm)

    train_loader = DataLoader(
        dataset=train_set,
        batch_size=args.batchsize,
        shuffle=True,
        num_workers=4,
        pin_memory=True,
    )
    valid_set = AttrDataset(args=args,
                            split=args.valid_split,
                            transform=valid_tsfm)

    valid_loader = DataLoader(
        dataset=valid_set,
        batch_size=args.batchsize,
        shuffle=False,
        num_workers=4,
        pin_memory=True,
    )

    print(f'{args.train_split} set: {len(train_loader.dataset)}, '
          f'{args.valid_split} set: {len(valid_loader.dataset)}, '
          f'attr_num : {train_set.attr_num}')

    labels = train_set.label
    sample_weight = labels.mean(0)

    backbone = resnet50()
    classifier = BaseClassifier(nattr=train_set.attr_num)
    model = FeatClassifier(backbone, classifier)

    checkpoint = torch.load(
        '/home/sohaibrabbani/PycharmProjects/Strong_Baseline_of_Pedestrian_Attribute_Recognition/exp_result/custom/custom/img_model/ckpt_max.pth'
    )
    model.load_state_dict({
        k.replace('module.', ''): v
        for k, v in checkpoint['state_dicts'].items()
    })

    for param in model.backbone.parameters():
        param.requires_grad = False

    if torch.cuda.is_available():
        model = torch.nn.DataParallel(model).cuda()

    criterion = CEL_Sigmoid(sample_weight)

    param_groups = [{
        'params':
        filter(lambda p: p.requires_grad, model.module.finetune_params()),
        'lr':
        args.lr_ft
    }, {
        'params':
        filter(lambda p: p.requires_grad, model.module.fresh_params()),
        'lr':
        args.lr_new
    }]

    optimizer = torch.optim.SGD(param_groups,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay,
                                nesterov=False)
    lr_scheduler = ReduceLROnPlateau(optimizer, factor=0.1, patience=4)

    best_metric, epoch = trainer(epoch=args.train_epoch,
                                 model=model,
                                 train_loader=train_loader,
                                 valid_loader=valid_loader,
                                 criterion=criterion,
                                 optimizer=optimizer,
                                 lr_scheduler=lr_scheduler,
                                 path=save_model_path)

    print(f'{visenv_name},  best_metrc : {best_metric} in epoch{epoch}')