Exemple #1
0
def main():
    args = parse_args()
    cfg = Config.fromfile(args.config)

    img_tensor = get_img_tensor(args.input, args.use_cuda)
    # global attribute predictor will not use landmarks
    # just set a default value
    landmark_tensor = torch.zeros(8)
    print(cfg.model)
    model = build_predictor(cfg.model)
    load_checkpoint(model, args.checkpoint, map_location='cpu')
    print('model loaded from {}'.format(args.checkpoint))
    if args.use_cuda:
        model.cuda()
        landmark_tensor = landmark_tensor.cuda()

    model.eval()

    # predict probabilities for each attribute
    attr_prob, cate_prob = model(
        img_tensor, attr=None, landmark=landmark_tensor, return_loss=False)
    attr_predictor = AttrPredictor(cfg.data.test)
    cate_predictor = CatePredictor(cfg.data.test)

    attr_predictor.show_prediction(attr_prob)
    cate_predictor.show_prediction(cate_prob)
Exemple #2
0
def main():
    args = parse_args()
    cfg = Config.fromfile(args.config)

    img_tensor = get_img_tensor(args.input, args.use_cuda)

    cfg.model.pretrained = None
    model = build_predictor(cfg.model)
    load_checkpoint(model, args.checkpoint, map_location='cpu')
    if args.use_cuda:
        model.cuda()

    model.eval()

    # predict probabilities for each attribute
    attr_prob = model(img_tensor, attr=None, landmark=None, return_loss=False)
    attr_predictor = AttrPredictor(cfg.data.test)

    attr_predictor.show_prediction(attr_prob)
Exemple #3
0
def main():
    args = parse_args()
    cfg = Config.fromfile(args.config)

    img = cv2.imread(args.input, -1)
    img_tensor = img_to_tensor(img, squeeze=True, cuda=args.use_cuda)

    model = build_predictor(cfg.model)
    checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu')
    if args.use_cuda:
        model.cuda()

    model.eval()

    # predict probabilities for each attribute
    attr_prob = model(img_tensor, attr=None, landmark=None, return_loss=False)
    attr_predictor = AttrPredictor(cfg.data.test)

    attr_predictor.show_prediction(attr_prob)
Exemple #4
0
def main():
    args = parse_args()
    cfg = Config.fromfile(args.config)
    if args.work_dir is not None:
        cfg.work_dir = args.work_dir
    if args.resume_from is not None:
        cfg.resume_from = args.resume_from

    # init distributed env first
    if args.launcher == 'none':
        distributed = False
    else:
        distributed = True
        init_dist(args.launcher, **cfg.dist_params)

    # init logger
    logger = get_root_logger(cfg.log_level)
    logger.info('Distributed training: {}'.format(distributed))

    # set random seeds
    if args.seed is not None:
        logger.info('Set random seed to {}'.format(args.seed))
        set_random_seed(args.seed)

    # build model
    model = build_predictor(cfg.model)
    print('model built')

    if cfg.init_weights_from:
        model = init_weights_from(cfg.init_weights_from, model)

    # data loader
    dataset = get_dataset(cfg.data.train)
    print('dataset loaded')

    # train
    train_predictor(
        model,
        dataset,
        cfg,
        distributed=distributed,
        validate=args.validate,
        logger=logger)
Exemple #5
0
def main():
    args = parse_args()
    cfg = Config.fromfile(args.config)

    cfg.model.pretrained = None
    model = build_predictor(cfg.model)
    load_checkpoint(model, args.checkpoint, map_location='cpu')
    if args.use_cuda:
        model.cuda()
    model.eval()
    attr_predictor = AttrPredictor(cfg.data.test)
    files=os.listdir(args.input)
    for i in tqdm(range(len(files))) :
      filename=files[i]
      #print(filename)      
      filename=args.input+'/'+filename
      img_tensor = get_img_tensor(filename, args.use_cuda) 
      attr_prob = model(img_tensor, attr=None, landmark=None, return_loss=False)
      attr_predictor.show_prediction(attr_prob,filename)
Exemple #6
0
def process_recordings(q, webapi):
    # Initialize clothe category classifier
    cfg = Config.fromfile(CONFIG_FILE)

    landmark_tensor = torch.zeros(8)

    model = build_predictor(cfg.model)
    load_checkpoint(model, CHECKPOINT_FILE, map_location='cpu')
    print('model loaded from {}'.format(CHECKPOINT_FILE))
    if USE_CUDA:
        model.cuda()
        landmark_tensor = landmark_tensor.cuda()

    model.eval()
    cate_predictor = CatePredictor(cfg.data.test, tops_type=[1])

    # Initialize tracker model
    yolo = Create_Yolo(input_size=YOLO_INPUT_SIZE)
    load_yolo_weights(yolo, Darknet_weights)  # use Darknet weights

    while True:
        if q.empty():
            time.sleep(1)
            continue

        recording_id, recording_filename = q.get()

        if recording_id == -1:
            break

        recording_filepath = os.path.join(RECORDINGS_DIRECTORY,
                                          recording_filename)
        Object_tracking(yolo,
                        webapi,
                        recording_id,
                        recording_filepath,
                        model,
                        cate_predictor,
                        landmark_tensor,
                        iou_threshold=0.1,
                        rectangle_colors=(255, 0, 0),
                        Track_only=["person"])
Exemple #7
0
def main():
    args = parse_args()
    cfg = Config.fromfile(args.config)
    if args.work_dir is not None:
        cfg.work_dir = args.work_dir
    if args.checkpoint is not None:
        cfg.load_from = args.checkpoint
    # init distributed env first
    if args.launcher == 'none':
        distributed = False
    else:
        distributed = True
        init_dist(args.launcher, **cfg.dist_params)

    # init logger
    logger = get_root_logger(cfg.log_level)
    logger.info('Distributed test: {}'.format(distributed))

    # data loader
    dataset = get_dataset(cfg.data.test)
    print('dataset loaded')

    # build model and load checkpoint
    model = build_predictor(cfg.model)
    print('model built')

    load_checkpoint(model, cfg.load_from, map_location='cpu')
    print('load checkpoint from: {}'.format(cfg.load_from))

    # test
    test_predictor(
        model,
        dataset,
        cfg,
        distributed=distributed,
        validate=args.validate,
        logger=logger)
Exemple #8
0
def basis_model(config, checkpoint):
    cfg = Config.fromfile(config)

    model = build_predictor(cfg.model)
    load_checkpoint(model, checkpoint, map_location='cpu')
    return model
Exemple #9
0
    return jsonify(resultDict)


if __name__ == '__main__':
    cfg_fine = Config.fromfile(
        './configs/category_attribute_predict/global_predictor_vgg.py')
    cfg_coarse = Config.fromfile(
        './configs/attribute_predict_coarse/global_predictor_resnet_attr.py')
    cfg_ret = Config.fromfile('configs/retriever_in_shop/global_retriever_vgg_loss_id.py')
    
    # global attribute predictor will not use landmarks
    # just set a default value
    landmark_tensor = torch.zeros(8)

    model_fine = build_predictor(cfg_fine.model)
    load_checkpoint(model_fine, './checkpoint/vgg16_fine_global.pth',
                    map_location='cpu')

    model_coarse = build_predictor(cfg_coarse.model)
    load_checkpoint(model_coarse, './checkpoint/resnet_coarse_global.pth',
                    map_location='cpu')


    model_ret = build_retriever(cfg_ret.model).cuda()
    load_checkpoint(model_ret, 'checkpoint/Retrieve/vgg/global/epoch_100.pth', map_location=torch.device('cuda:0'))

    print('Models loaded.')

    model_fine.eval()
    model_coarse.eval()