parser = argparse.ArgumentParser(description="attribute detection")
parser.add_argument("--trained_model", type=str)
# parser.add_argument("--label_file", type=str, help="The label file path.")
# parser.add_argument("--nms_method", type=str, default="hard")
args = parser.parse_args()

model_kwargs = {'drop_pool5': True,
                 'drop_pool5_rate': 0.5,
                 'last_conv_stride': 2,
                 'num_att': 35}

DEVICE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
if __name__ == '__main__':
    model = DeepMAR_ResNet50(**model_kwargs)
    state = torch.load('/home/thulab/Desktop/pedestrian-attribute-recognition-pytorch/exp/deepmar_resnet50/pa100k/partition0/run1/model/ckpt_epoch150.pth')
    # print(type(state['state_dicts']))
    # print(len(state['state_dicts']))
    # print(state['state_dicts'][0])
    model.load_state_dict(state['state_dicts'][0], strict=True)

    example = torch.rand(1, 3, 224, 224)

    torch_out = torch.onnx.export(model,
                                  example,
                                  "test.onnx",
                                  verbose=True
                                  )

    print('onnx done!')
# set the random seed
if cfg.set_seed:
    set_seed( cfg.rand_seed )
# init the gpu ids
set_devices(cfg.sys_device_ids)

# dataset
normalize = transforms.Normalize(mean=cfg.mean, std=cfg.std)
test_transform = transforms.Compose([
        transforms.Resize(cfg.resize),
        transforms.ToTensor(),
        normalize,])

### Att model ###
model = DeepMAR_ResNet50(**cfg.model_kwargs)

# load model weight if necessary
if cfg.load_model_weight:
    map_location = (lambda storage, loc:storage)
    ckpt = torch.load(cfg.model_weight_file, map_location=map_location)
    model.load_state_dict(ckpt['state_dicts'][0])

model.cuda()
model.eval()

# load one image
img = Image.open(cfg.demo_image)
img_trans = test_transform( img )
img_trans = torch.unsqueeze(img_trans, dim=0)
img_var = Variable(img_trans).cuda()
Ejemplo n.º 3
0
                                           num_workers=cfg.workers,
                                           pin_memory=True,
                                           drop_last=False)

test_transform = transforms.Compose([
    transforms.Resize(cfg.resize),
    transforms.ToTensor(),
    normalize,
])
test_set = AttDataset(dataset=cfg.dataset,
                      partition=cfg.partition,
                      split=cfg.test_split,
                      partition_idx=cfg.partition_idx,
                      transform=test_transform)
### Att model ###
model = DeepMAR_ResNet50(**cfg.model_kwargs)  # 使用ResNet-50进行多标签分类

# Wrap the model after set_devices, data parallel
model_w = torch.nn.DataParallel(model)  # 使用DataParallel进行单机多卡训练

# using the weighted cross entropy loss
if cfg.weighted_entropy:
    rate = np.array(train_set.partition['weight_' +
                                        cfg.split][cfg.partition_idx])
    rate = rate[train_set.dataset['selected_attribute']].tolist()
else:
    rate = None
# compute the weight of positive and negative
if rate is None:
    weight_pos = [1 for i in range(num_att)]
    weight_neg = [1 for i in range(num_att)]