Exemplo n.º 1
0
  def test_repeated_base_names(self):
    """Ensures duplicate base names reject the entire config."""
    template_config = config_pb2.InstanceTemplateConfig(
        templates=[
            config_pb2.InstanceTemplateConfig.InstanceTemplate(
                base_name='base-name-1',
            ),
            config_pb2.InstanceTemplateConfig.InstanceTemplate(
                base_name='base-name-2',
            ),
            config_pb2.InstanceTemplateConfig.InstanceTemplate(
                base_name='base-name-3',
            ),
            config_pb2.InstanceTemplateConfig.InstanceTemplate(
                base_name='base-name-4',
            ),
            config_pb2.InstanceTemplateConfig.InstanceTemplate(
                base_name='base-name-2',
            ),
            config_pb2.InstanceTemplateConfig.InstanceTemplate(
                base_name='base-name-3',
            ),
        ],
    )
    self.install_mock(template_config=template_config)

    config.update_config()
    self.failIf(config.Configuration.cached().template_config)
    self.failIf(config.Configuration.cached().manager_config)
    self.failIf(config.Configuration.cached().revision)
Exemplo n.º 2
0
  def test_empty_configs(self):
    """Ensures empty configs are successfully stored."""
    self.install_mock()

    config.update_config()
    self.failIf(config.Configuration.cached().template_config)
    self.failIf(config.Configuration.cached().manager_config)
    self.assertEqual(config.Configuration.cached().revision, 'mock-revision')
Exemplo n.º 3
0
    def username_changed(self, username):
        logging.info("Changing username from %s to %s", self.account.username, username)

        update_config(username=username)  # Update config.

        if self.account.password is not None:
            del self.account.password  # Delete old password, if exists.

        self.account.username = username
        self.account.balance = self.account.byte = None

        self.account.update_all()
Exemplo n.º 4
0
def authenticate_user(user_name):
    user_items = None
    if os.path.isfile(GLOBAL_CONFIG_FILEPATH):
        global_config_object = config.load_config(GLOBAL_CONFIG_FILEPATH)
        if user_name in global_config_object.sections():
            user_items = config.load_user(global_config_object, user_name)
            return user_items
    print("enter user credentials for user " + user_name)
    user_items = prompt_user_for_user_items(user_name)
    config.create_config(GLOBAL_CONFIG_FILEPATH)
    config.update_config(
        GLOBAL_CONFIG_FILEPATH, user_name, user_items)
    return user_items
Exemplo n.º 5
0
def perform_installation(*args, **kwds):
    # Set up Role-based Access Control
    install_rbac()

    q = UserModel.query(UserModel.username == "admin").get()
    if not q:
        model = UserModel(
            username="******", display_name=_("Admin"), password="******", email="*****@*****.**", verified=True
        )
        model.put(force_validation=False)
        rbac.add_role(model.key, rbac.default_role("super_admin"))

    # Configurations
    for item in config_setup.default_configs():
        config.update_config(item.name, item.value, item.visible)
Exemplo n.º 6
0
def do_login():
  from config import config, update_config
  request_token_url = 'http://twitter.com/oauth/request_token'
  access_token_url = 'http://twitter.com/oauth/access_token'
  authorize_url = 'http://twitter.com/oauth/authorize'
  success_html = "<html><head><title>Thanks!</title></head><body><h3>Thanks, you're logged in!</h3><p>Feel free to close this window and return to the app.</p></body></html>"
  consumer = oauth2.Consumer(config['consumer']['key'], config['consumer']['secret'])
  client = oauth2.Client(consumer)

  resp, content = client.request(request_token_url, "GET")
  if resp['status'] != '200':
      raise Exception("Invalid response %s." % resp['status'])

  request_token = dict(urlparse.parse_qsl(content))

  print "Please visit the following link in your browser:"
  print "%s?oauth_token=%s" % (authorize_url, request_token['oauth_token'])

  oauth_token = ""
  oauth_verifier = ""
  class MyHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
    def log_request(code, size):
      pass # Skip logging output
    def do_GET(self, *args):
      global oauth_token, oauth_verifier
      creds = urlparse.parse_qs(urlparse.urlparse(self.path)[4])
      oauth_token = creds['oauth_token']
      oauth_verifier = creds['oauth_verifier']
      self.send_response(200, "OK")
      self.end_headers()
      self.wfile.write(success_html)

  httpd = SocketServer.TCPServer(("", 8000), MyHandler)
  httpd.handle_request()

  token = oauth2.Token(request_token['oauth_token'],
      request_token['oauth_token_secret'])
  token.set_verifier(oauth_verifier)
  client = oauth2.Client(consumer, token)

  resp, content = client.request(access_token_url, "POST")
  auth = dict(urlparse.parse_qsl(content))
  print "Authorized!  Your pine siskin config file has been updated."

  update_config('auth', auth)
Exemplo n.º 7
0
def authenticate_user(user_name):
    user_items = None
    if os.path.isfile(GLOBAL_CONFIG_FILEPATH):
        global_config_object = config.load_config(GLOBAL_CONFIG_FILEPATH)
        if user_name in global_config_object.sections():
            user_items = config.load_user(global_config_object, user_name)
            return user_items
    user_items = prompt_user_for_user_items(user_name)
    if not user_items:
        return None
    try:
        config.create_config(GLOBAL_CONFIG_FILEPATH)
    except Exception as e:
        print("Failed to create authentication config file due to {}".format(e))
        sys.exit(1)
    config.update_config(
        GLOBAL_CONFIG_FILEPATH, user_name, user_items)
    return user_items
Exemplo n.º 8
0
def perform_installation(*args, **kwds):
    #Set up Role-based Access Control
    install_rbac();

    q = UserModel.query(UserModel.username=="admin").get()
    if not q:
        model = UserModel(username="******", password="******", email="*****@*****.**", verified=True)
        model.put(force_validation=False)
        rbac.add_role(model.key, rbac.default_role("super_admin"))

    #Configurations
    conf = [
            ("site_name", "Name", True),
            ("session_secret_key", utils.generate_random_string(30), False),
            ("admin_email", "*****@*****.**", True),
            ("user_email_confirm", "no", True),
        ]
    for item in conf:
        config.update_config(item[0], item[1], item[2])
Exemplo n.º 9
0
  def test_update_configs(self):
    """Ensures config is updated when revision changes."""
    manager_config = config_pb2.InstanceGroupManagerConfig(
        managers=[config_pb2.InstanceGroupManagerConfig.InstanceGroupManager()],
    )
    self.install_mock(revision='revision-1', manager_config=manager_config)

    config.update_config()
    self.failIf(config.Configuration.cached().template_config)
    self.failUnless(config.Configuration.cached().manager_config)
    self.assertEqual(config.Configuration.cached().revision, 'revision-1')

    template_config = config_pb2.InstanceTemplateConfig(
        templates=[config_pb2.InstanceTemplateConfig.InstanceTemplate()],
    )
    self.install_mock(revision='revision-2', template_config=template_config)

    config.update_config()
    self.failUnless(config.Configuration.cached().template_config)
    self.failIf(config.Configuration.cached().manager_config)
    self.assertEqual(config.Configuration.cached().revision, 'revision-2')
Exemplo n.º 10
0
  def test_update_configs_same_revision(self):
    """Ensures config is not updated when revision doesn't change."""
    manager_config = config_pb2.InstanceGroupManagerConfig(
        managers=[config_pb2.InstanceGroupManagerConfig.InstanceGroupManager()],
    )
    self.install_mock(manager_config=manager_config)

    config.update_config()
    self.failIf(config.Configuration.cached().template_config)
    self.failUnless(config.Configuration.cached().manager_config)
    self.assertEqual(config.Configuration.cached().revision, 'mock-revision')

    template_config = config_pb2.InstanceTemplateConfig(
        templates=[config_pb2.InstanceTemplateConfig.InstanceTemplate()],
    )
    self.install_mock(template_config=template_config)

    config.update_config()
    self.failIf(config.Configuration.cached().template_config)
    self.failUnless(config.Configuration.cached().manager_config)
    self.assertEqual(config.Configuration.cached().revision, 'mock-revision')
Exemplo n.º 11
0
  def test_repeated_zone_different_base_name(self):
    """Ensures repeated zones in different base names are valid."""
    manager_config = config_pb2.InstanceGroupManagerConfig(
        managers=[
            config_pb2.InstanceGroupManagerConfig.InstanceGroupManager(
                template_base_name='base-name-1',
                zone='us-central1-a',
            ),
            config_pb2.InstanceGroupManagerConfig.InstanceGroupManager(
                template_base_name='base-name-2',
                zone='us-central1-a',
            ),
            config_pb2.InstanceGroupManagerConfig.InstanceGroupManager(
                template_base_name='base-name-3',
                zone='us-central1-a',
            ),
        ],
    )
    self.install_mock(manager_config=manager_config)

    config.update_config()
    self.failIf(config.Configuration.cached().template_config)
    self.failUnless(config.Configuration.cached().manager_config)
    self.assertEqual(config.Configuration.cached().revision, 'mock-revision')
Exemplo n.º 12
0
Arquivo: lite.py Projeto: pobv/lite
def application(environ, start_response):
    "the apache mod_wsgi entry point, central dispatcher"
    if CHECK_CONFIG:
        # reading the configpath globally is a bad idea
        # new processes are added if there is database contention
        # but exactly then reading from the database fails...
        # use a default if available
        configpath = CONFIGPATH if CONFIGPATH else config.get_configpath()
        if configpath:
            config.update_config(configpath)
    reqpath = _request_path(environ)
    content = []
    if reqpath.startswith(LITE):
        reqpath = reqpath[len(LITE):]
    if reqpath == "/" or reqpath.startswith("/index"):
        index(environ, content)
    elif reqpath.startswith("/show"):
        show(environ, content)
    elif reqpath.startswith("/inc"):
        inc(environ, content)
    elif reqpath.startswith("/init"):
        init(environ, content)
    elif reqpath.startswith("/env"):
        env(environ, content)
    else:
        content.append(u"%s WHAT?" % reqpath)
    content = PAGE % {'title': u"python and sqlite3 patterns and tests",
                      'content': u"\n".join(content),
                      'home': wsgiref.util.application_uri(environ)
                      }
    content = content.encode('utf-8')
    start_response('200 OK', [
        ('Content-Type', "text/html; charset=utf-8"),
        ('Content-Length', str(len(content)))
        ])
    return [content]
Exemplo n.º 13
0
  def test_repeated_zone_same_base_name(self):
    """Ensures repeated zones in a base name reject the entire config."""
    manager_config = config_pb2.InstanceGroupManagerConfig(
        managers=[
            config_pb2.InstanceGroupManagerConfig.InstanceGroupManager(
                template_base_name='base-name-1',
                zone='us-central1-a',
            ),
            config_pb2.InstanceGroupManagerConfig.InstanceGroupManager(
                template_base_name='base-name-2',
                zone='us-central1-b',
            ),
            config_pb2.InstanceGroupManagerConfig.InstanceGroupManager(
                template_base_name='base-name-1',
                zone='us-central1-a',
            ),
        ],
    )
    self.install_mock(manager_config=manager_config)

    config.update_config()
    self.failIf(config.Configuration.cached().template_config)
    self.failIf(config.Configuration.cached().manager_config)
    self.failIf(config.Configuration.cached().revision)
Exemplo n.º 14
0
def main():
    # transformation
    pose_transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406],
                             std=[0.229, 0.224, 0.225]),
    ])

    # cudnn related setting
    cudnn.benchmark = cfg.CUDNN.BENCHMARK
    torch.backends.cudnn.deterministic = cfg.CUDNN.DETERMINISTIC
    torch.backends.cudnn.enabled = cfg.CUDNN.ENABLED

    args = parse_args()
    update_config(cfg, args)
    pose_dir = prepare_output_dirs(args.outputDir)
    csv_output_rows = []

    box_model = torchvision.models.detection.fasterrcnn_resnet50_fpn(
        pretrained=True)
    box_model.to(CTX)
    box_model.eval()
    pose_model = eval('models.' + cfg.MODEL.NAME + '.get_pose_net')(
        cfg, is_train=False)

    if cfg.TEST.MODEL_FILE:
        print('=> loading model from {}'.format(cfg.TEST.MODEL_FILE))
        pose_model.load_state_dict(torch.load(cfg.TEST.MODEL_FILE),
                                   strict=False)
    else:
        print('expected model defined in config at TEST.MODEL_FILE')

    pose_model.to(CTX)
    pose_model.eval()

    # Loading an video
    vidcap = cv2.VideoCapture(args.videoFile)
    fps = vidcap.get(cv2.CAP_PROP_FPS)
    if fps < args.inferenceFps:
        print('desired inference fps is ' + str(args.inferenceFps) +
              ' but video fps is ' + str(fps))
        exit()
    skip_frame_cnt = round(fps / args.inferenceFps)
    frame_width = int(vidcap.get(cv2.CAP_PROP_FRAME_WIDTH))
    frame_height = int(vidcap.get(cv2.CAP_PROP_FRAME_HEIGHT))
    outcap = cv2.VideoWriter(
        '{}/{}_pose.avi'.format(
            args.outputDir,
            os.path.splitext(os.path.basename(args.videoFile))[0]),
        cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'), int(skip_frame_cnt),
        (frame_width, frame_height))

    count = 0
    while vidcap.isOpened():
        total_now = time.time()
        ret, image_bgr = vidcap.read()
        count += 1

        if not ret:
            continue

        if count % skip_frame_cnt != 0:
            continue

        image_rgb = cv2.cvtColor(image_bgr, cv2.COLOR_BGR2RGB)

        # Clone 2 image for person detection and pose estimation
        if cfg.DATASET.COLOR_RGB:
            image_per = image_rgb.copy()
            image_pose = image_rgb.copy()
        else:
            image_per = image_bgr.copy()
            image_pose = image_bgr.copy()

        # Clone 1 image for debugging purpose
        image_debug = image_bgr.copy()

        # object detection box
        now = time.time()
        pred_boxes = get_person_detection_boxes(box_model,
                                                image_per,
                                                threshold=0.9)
        then = time.time()
        print("Find person bbox in: {} sec".format(then - now))

        # Can not find people. Move to next frame
        if not pred_boxes:
            count += 1
            continue

        if args.writeBoxFrames:
            for box in pred_boxes:
                cv2.rectangle(
                    image_debug,
                    box[0],
                    box[1],
                    color=(0, 255, 0),
                    thickness=3)  # Draw Rectangle with the coordinates

        # pose estimation : for multiple people
        centers = []
        scales = []
        for box in pred_boxes:
            center, scale = box_to_center_scale(box, cfg.MODEL.IMAGE_SIZE[0],
                                                cfg.MODEL.IMAGE_SIZE[1])
            centers.append(center)
            scales.append(scale)

        now = time.time()
        pose_preds = get_pose_estimation_prediction(pose_model,
                                                    image_pose,
                                                    centers,
                                                    scales,
                                                    transform=pose_transform)
        then = time.time()
        print("Find person pose in: {} sec".format(then - now))

        new_csv_row = []
        for coords in pose_preds:
            # Draw each point on image
            for coord in coords:
                x_coord, y_coord = int(coord[0]), int(coord[1])
                cv2.circle(image_debug, (x_coord, y_coord), 4, (255, 0, 0), 2)
                new_csv_row.extend([x_coord, y_coord])

        total_then = time.time()

        text = "{:03.2f} sec".format(total_then - total_now)
        cv2.putText(image_debug, text, (100, 50), cv2.FONT_HERSHEY_SIMPLEX, 1,
                    (0, 0, 255), 2, cv2.LINE_AA)

        #cv2.imshow("pos", image_debug)
        #if cv2.waitKey(1) & 0xFF == ord('q'):
        #    break

        csv_output_rows.append(new_csv_row)
        img_file = os.path.join(pose_dir, 'pose_{:08d}.jpg'.format(count))
        cv2.imwrite(img_file, image_debug)
        outcap.write(image_debug)

    # write csv
    csv_headers = ['frame']
    for keypoint in COCO_KEYPOINT_INDEXES.values():
        csv_headers.extend([keypoint + '_x', keypoint + '_y'])

    csv_output_filename = os.path.join(args.outputDir, 'pose-data.csv')
    with open(csv_output_filename, 'w', newline='') as csvfile:
        csvwriter = csv.writer(csvfile)
        csvwriter.writerow(csv_headers)
        csvwriter.writerows(csv_output_rows)

    vidcap.release()
    outcap.release()
def main():
    args = parse_args()
    update_config(cfg, args)

    # Add KFold training support
    isKFold = True
    cross_train_set = get_img_Ids(
        'data/tiger/annotations/person_keypoints_train.json')
    # bas_train, base_test = kfold_split_generate(cross_train_set)
    from sklearn.model_selection import KFold
    kFoldNum = 1
    kf = KFold(n_splits=5)  # Create 10 splits for the Tiger dataset
    for Ftrain, Ftest in kf.split(cross_train_set):
        train_data = [cross_train_set[i] for i in Ftrain]

        val_data = [cross_train_set[i] for i in Ftest]

        logger, final_output_dir, tb_log_dir = create_logger(
            cfg, args.cfg, 'train', str(kFoldNum))

        kFoldNum = kFoldNum + 1

        logger.info(pprint.pformat(args))
        logger.info(cfg)

        # cudnn related setting
        cudnn.benchmark = cfg.CUDNN.BENCHMARK
        torch.backends.cudnn.deterministic = cfg.CUDNN.DETERMINISTIC
        torch.backends.cudnn.enabled = cfg.CUDNN.ENABLED

        model = eval('models.' + cfg.MODEL.NAME + '.get_pose_net')(
            cfg, is_train=True)

        # copy model file
        this_dir = os.path.dirname(__file__)
        shutil.copy2(
            os.path.join(this_dir, '../lib/models', cfg.MODEL.NAME + '.py'),
            final_output_dir)
        # logger.info(pprint.pformat(model))

        writer_dict = {
            'writer': SummaryWriter(log_dir=tb_log_dir),
            'train_global_steps': 0,
            'valid_global_steps': 0,
        }

        dump_input = torch.rand(
            (1, 3, cfg.MODEL.IMAGE_SIZE[1], cfg.MODEL.IMAGE_SIZE[0]))
        writer_dict['writer'].add_graph(model, (dump_input, ))

        logger.info(get_model_summary(model, dump_input))

        model = torch.nn.DataParallel(model, device_ids=cfg.GPUS).cuda()

        # define loss function (criterion) and optimizer
        criterion = JointsMSELoss(
            use_target_weight=cfg.LOSS.USE_TARGET_WEIGHT).cuda()

        # Data loading code
        normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                         std=[0.229, 0.224, 0.225])

        print("TRAIN DATA COUNT : " + str(len(train_data)))
        print("VAL DATA COUNT : " + str(len(val_data)))

        train_dataset = eval('dataset.' + cfg.DATASET.DATASET)(
            cfg, cfg.DATASET.ROOT, cfg.DATASET.TRAIN_SET, True,
            transforms.Compose([
                transforms.ToTensor(),
                normalize,
            ]), isKFold, train_data)
        valid_dataset = eval('dataset.' + cfg.DATASET.DATASET)(
            cfg, cfg.DATASET.ROOT, cfg.DATASET.TEST_SET, False,
            transforms.Compose([
                transforms.ToTensor(),
                normalize,
            ]), False, val_data)

        train_loader = torch.utils.data.DataLoader(
            train_dataset,
            batch_size=cfg.TRAIN.BATCH_SIZE_PER_GPU * len(cfg.GPUS),
            shuffle=cfg.TRAIN.SHUFFLE,
            num_workers=cfg.WORKERS,
            pin_memory=cfg.PIN_MEMORY)
        valid_loader = torch.utils.data.DataLoader(
            valid_dataset,
            batch_size=cfg.TEST.BATCH_SIZE_PER_GPU * len(cfg.GPUS),
            shuffle=False,
            num_workers=cfg.WORKERS,
            pin_memory=cfg.PIN_MEMORY)

        best_perf = 0.0
        best_model = False
        last_epoch = -1
        optimizer = get_optimizer(cfg, model)
        begin_epoch = cfg.TRAIN.BEGIN_EPOCH
        checkpoint_file = os.path.join(final_output_dir, 'checkpoint.pth')

        if cfg.AUTO_RESUME and os.path.exists(checkpoint_file):
            logger.info("=> loading checkpoint '{}'".format(checkpoint_file))
            checkpoint = torch.load(checkpoint_file)
            begin_epoch = checkpoint['epoch']
            best_perf = checkpoint['perf']
            last_epoch = checkpoint['epoch']
            model.load_state_dict(checkpoint['state_dict'])

            optimizer.load_state_dict(checkpoint['optimizer'])
            logger.info("=> loaded checkpoint '{}' (epoch {})".format(
                checkpoint_file, checkpoint['epoch']))

        lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(
            optimizer,
            cfg.TRAIN.LR_STEP,
            cfg.TRAIN.LR_FACTOR,
            last_epoch=last_epoch)

        for epoch in range(begin_epoch, cfg.TRAIN.END_EPOCH):
            lr_scheduler.step()

            # train for one epoch
            train(cfg, train_loader, model, criterion, optimizer, epoch,
                  final_output_dir, tb_log_dir, writer_dict)

            # evaluate on validation set
            perf_indicator = validate(cfg, valid_loader, valid_dataset, model,
                                      criterion, final_output_dir, tb_log_dir,
                                      writer_dict)

            if perf_indicator >= best_perf:
                best_perf = perf_indicator
                best_model = True
            else:
                best_model = False

            logger.info('=> saving checkpoint to {}'.format(final_output_dir))
            save_checkpoint(
                {
                    'epoch': epoch + 1,
                    'model': cfg.MODEL.NAME,
                    'state_dict': model.state_dict(),
                    'best_state_dict': model.module.state_dict(),
                    'perf': perf_indicator,
                    'optimizer': optimizer.state_dict(),
                }, best_model, final_output_dir)

        final_model_state_file = os.path.join(final_output_dir,
                                              'final_state.pth')
        logger.info(
            '=> saving final model state to {}'.format(final_model_state_file))
        torch.save(model.module.state_dict(), final_model_state_file)
        writer_dict['writer'].close()
Exemplo n.º 16
0
def reg2loc(cfg, pred_regs):
    # pred_regs [batch, 254, 2]
    num_batch = pred_regs.size()[0]
    location_point = location_point_back(cfg, to_tensor=True)  # [1, 254]
    location_point = location_point.repeat(num_batch, 1)
    pred_locs = torch.zeros(pred_regs.size()).type_as(dtype)

    # filter out
    num_pred = pred_regs.size(1)
    location_point = location_point[:, :num_pred].contiguous()
    # left boundary
    pred_locs[:, :, 0] = location_point - pred_regs[:, :, 0]
    # right boundary
    pred_locs[:, :, 1] = location_point + pred_regs[:, :, 1]

    return pred_locs


if __name__ == '__main__':
    from config import cfg, update_config
    cfg_file = '/data/2/v-yale/ActionLocalization/experiments/anet/ssad_bk.yaml'
    update_config(cfg_file)

    location, duration = location_point_back(cfg,
                                             to_tensor=False,
                                             to_duration=True)
    i = 0
    for loc, dur in zip(location, duration):
        print(i, loc, dur)
        i += 1
Exemplo n.º 17
0
def main_worker(gpu, ngpus_per_node, args, final_output_dir, tb_log_dir):

    args.gpu = gpu
    args.rank = args.rank * ngpus_per_node + gpu
    print('Init process group: dist_url: {}, world_size: {}, rank: {}'.format(cfg.DIST_URL, args.world_size, args.rank))
    dist.init_process_group(backend=cfg.DIST_BACKEND, init_method=cfg.DIST_URL, world_size=args.world_size, rank=args.rank)

    update_config(cfg, args)

    # setup logger
    logger, _ = setup_logger(final_output_dir, args.rank, 'train')

    model = eval('models.'+cfg.MODEL.NAME+'.get_pose_net')(cfg, is_train=True)
    logger.info(get_model_summary(model, torch.zeros(1, 3, *cfg.MODEL.IMAGE_SIZE)))

    # copy model file
    if not cfg.MULTIPROCESSING_DISTRIBUTED or (cfg.MULTIPROCESSING_DISTRIBUTED and args.rank % ngpus_per_node == 0):
        this_dir = os.path.dirname(__file__)
        shutil.copy2(os.path.join(this_dir, '../lib/models', cfg.MODEL.NAME + '.py'), final_output_dir)

    writer_dict = {
        'writer': SummaryWriter(log_dir=tb_log_dir),
        'train_global_steps': 0,
        'valid_global_steps': 0,
    }

    if not cfg.MULTIPROCESSING_DISTRIBUTED or (cfg.MULTIPROCESSING_DISTRIBUTED and args.rank % ngpus_per_node == 0):
        dump_input = torch.rand((1, 3, cfg.MODEL.IMAGE_SIZE[1], cfg.MODEL.IMAGE_SIZE[0]))
        writer_dict['writer'].add_graph(model, (dump_input, ))
        # logger.info(get_model_summary(model, dump_input, verbose=cfg.VERBOSE))

    if cfg.MODEL.SYNC_BN:
        model = nn.SyncBatchNorm.convert_sync_batchnorm(model)
    
    torch.cuda.set_device(args.gpu)
    model.cuda(args.gpu)
    model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])

    # define loss function (criterion) and optimizer
    criterion = JointsMSELoss(use_target_weight=cfg.LOSS.USE_TARGET_WEIGHT).cuda(args.gpu)

    # Data loading code
    train_dataset = eval('dataset.'+cfg.DATASET.DATASET)(
        cfg, cfg.DATASET.ROOT, cfg.DATASET.TRAIN_SET, True,
        transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
        ])
    )
    valid_dataset = eval('dataset.'+cfg.DATASET.DATASET)(
        cfg, cfg.DATASET.ROOT, cfg.DATASET.TEST_SET, False,
        transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
        ])
    )
    
    train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
    train_loader = torch.utils.data.DataLoader(
        train_dataset,
        batch_size=cfg.TRAIN.BATCH_SIZE_PER_GPU*len(cfg.GPUS),
        shuffle=(train_sampler is None),
        num_workers=cfg.WORKERS,
        pin_memory=cfg.PIN_MEMORY,
        sampler=train_sampler
    )

    valid_loader = torch.utils.data.DataLoader(
        valid_dataset,
        batch_size=cfg.TEST.BATCH_SIZE_PER_GPU*len(cfg.GPUS),
        shuffle=False,
        num_workers=cfg.WORKERS,
        pin_memory=cfg.PIN_MEMORY
    )
    logger.info(train_loader.dataset)

    best_perf = -1
    best_model = False
    last_epoch = -1
    optimizer = get_optimizer(cfg, model)
    begin_epoch = cfg.TRAIN.BEGIN_EPOCH
    checkpoint_file = os.path.join(final_output_dir, 'checkpoint.pth.tar')
    if cfg.AUTO_RESUME and os.path.exists(checkpoint_file):
        logger.info("=> loading checkpoint '{}'".format(checkpoint_file))
        checkpoint = torch.load(checkpoint_file)
        begin_epoch = checkpoint['epoch']
        best_perf = checkpoint['perf']
        last_epoch = checkpoint['epoch']
        model.load_state_dict(checkpoint['state_dict'])

        optimizer.load_state_dict(checkpoint['optimizer'])
        logger.info("=> loaded checkpoint '{}' (epoch {})".format(checkpoint_file, checkpoint['epoch']))

    lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(
        optimizer, cfg.TRAIN.LR_STEP, cfg.TRAIN.LR_FACTOR,
        last_epoch=last_epoch)

    for epoch in range(begin_epoch, cfg.TRAIN.END_EPOCH):
        
        # train for one epoch
        train(cfg, train_loader, model, criterion, optimizer, epoch,
              final_output_dir, tb_log_dir, writer_dict)
        # In PyTorch 1.1.0 and later, you should call `lr_scheduler.step()` after `optimizer.step()`.
        lr_scheduler.step()

        # evaluate on validation set
        perf_indicator = validate(
            args, cfg, valid_loader, valid_dataset, model, criterion,
            final_output_dir, tb_log_dir, writer_dict
        )

        if perf_indicator >= best_perf:
            best_perf = perf_indicator
            best_model = True
        else:
            best_model = False

        if not cfg.MULTIPROCESSING_DISTRIBUTED or (
                cfg.MULTIPROCESSING_DISTRIBUTED
                and args.rank == 0
        ):
            logger.info('=> saving checkpoint to {}'.format(final_output_dir))
            save_checkpoint({
                'epoch': epoch + 1,
                'model': cfg.MODEL.NAME,
                'state_dict': model.state_dict(),
                'best_state_dict': model.module.state_dict(),
                'perf': perf_indicator,
                'optimizer': optimizer.state_dict(),
            }, best_model, final_output_dir)

    final_model_state_file = os.path.join(
        final_output_dir, 'final_state{}.pth.tar'.format(gpu)
    )

    logger.info('saving final model state to {}'.format(
        final_model_state_file))
    torch.save(model.module.state_dict(), final_model_state_file)
    writer_dict['writer'].close()
Exemplo n.º 18
0
def main():
    args = parse_args()
    update_config(cfg, args)

    if not args.camera:
        # handle video
        cam = cv2.VideoCapture(args.video_input)
        video_length = int(cam.get(cv2.CAP_PROP_FRAME_COUNT))
    else:
        cam = cv2.VideoCapture(0)
        video_length = 30000

    ret_val, input_image = cam.read()
    # Video writer
    fourcc = cv2.VideoWriter_fourcc(*'mp4v')
    input_fps = cam.get(cv2.CAP_PROP_FPS)
    out = cv2.VideoWriter(args.video_output, fourcc, input_fps,
                          (input_image.shape[1], input_image.shape[0]))

    #### load pose-hrnet MODEL
    pose_model = model_load(cfg)
    #  pose_model = torch.nn.DataParallel(pose_model, device_ids=[0,1]).cuda()
    pose_model.cuda()

    item = 0
    for i in tqdm(range(video_length - 1)):

        x0 = ckpt_time()
        ret_val, input_image = cam.read()

        #  if args.camera:
        #  #  为取得实时速度,每两帧取一帧预测
        #  if item == 0:
        #  item = 1
        #  continue

        item = 0
        try:
            bboxs, scores = mm_det(human_model, input_image)
            # bbox is coordinate location
            inputs, origin_img, center, scale = PreProcess(
                input_image, bboxs, scores, cfg)
        except:
            out.write(input_image)
            cv2.namedWindow("enhanced", 0)
            cv2.resizeWindow("enhanced", 960, 480)
            cv2.imshow('enhanced', input_image)
            cv2.waitKey(2)
            continue

        with torch.no_grad():
            # compute output heatmap
            inputs = inputs[:, [2, 1, 0]]
            output = pose_model(inputs.cuda())
            # compute coordinate
            preds, maxvals = get_final_preds(cfg,
                                             output.clone().cpu().numpy(),
                                             np.asarray(center),
                                             np.asarray(scale))

        image = plot_keypoint(origin_img, preds, maxvals, 0.1)
        out.write(image)
        if args.display:
            ######### 全屏
            #  out_win = "output_style_full_screen"
            #  cv2.namedWindow(out_win, cv2.WINDOW_NORMAL)
            #  cv2.setWindowProperty(out_win, cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)
            #  cv2.imshow(out_win, image)

            ########### 指定屏幕大小
            cv2.namedWindow("enhanced", cv2.WINDOW_GUI_NORMAL)
            cv2.resizeWindow("enhanced", 960, 480)
            cv2.imshow('enhanced', image)
            cv2.waitKey(1)
Exemplo n.º 19
0
def main():
    """ Main function of parserDELWAQ """

# <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
# ~~~~ Reads config file ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
    print('\n\nLoading Options and Configurations\n'+'~'*72+'\n')
    parser = ArgumentParser(
        formatter_class=RawDescriptionHelpFormatter,
        description=('''\n\
Tools for handling DELWAQ files when created by TELEMAC
        '''),
        usage=' (--help for help)\n---------\n       =>  '
              '%(prog)s [option] delwaq.cas \n---------')
    parser = add_config_argument(parser)
    parser.add_argument(
        "--reset", action="store_true",
        dest="areset", default=False,
        help="reset the start time to zero")
    parser.add_argument(
        "--minvol",
        dest="minvol", default='0.001',
        help="make sure there is a minimum volume")
    parser.add_argument(
        "--from",
        dest="tfrom", default="1",
        help="specify the first frame included")
    parser.add_argument(
        "--stop",
        dest="tstop", default="-1",
        help="specify the last frame included (negative from the end)")
    parser.add_argument("args", nargs="+")
    options = parser.parse_args()

# <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
# ~~~~ Works for only one configuration ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
    update_config(options)
    CFGS.compute_compilation_info()

# <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
# ~~~~ Reads command line arguments ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
    if len(options.args) < 1:
        print('\nAt least one DELWAQ steering file name is required\n')
        parser.print_help()
        raise Exception
    file_names = options.args

# <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
# ~~~~ Loop over the DELWAQ files ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
    for fle in file_names:

        # ~~> Parse DELWAQ steering file
        print('      ~> scanning your DELWAQ file: '+path.basename(fle))
        dwq = DELWAQ(fle)

        # ~~> Possible options so far
        if options.areset:
            dwq.reset_dwq()
        dwq.minvol_dwq(options.minvol)
        dwq.sample_dwq(options.tfrom, options.tstop)

        # ~~> Convert to Little Endian
        dwq.big2little()


# <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
# ~~~~ Jenkins' success message ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
    print('\n\nMy work is done\n\n')

    sys.exit(0)
Exemplo n.º 20
0
def main():
    args = parse_args()
    update_config(cfg, args)

    os.environ["CUDA_VISIBLE_DEVICES"] = ','.join([str(x) for x in cfg.GPUS])

    logger, final_output_dir, tb_log_dir = create_logger(cfg,
                                                         args.cfg,
                                                         phase='valid')

    logger.info(pprint.pformat(args))
    logger.info(cfg)

    # cudnn related setting
    cudnn.benchmark = cfg.CUDNN.BENCHMARK
    torch.backends.cudnn.deterministic = cfg.CUDNN.DETERMINISTIC
    torch.backends.cudnn.enabled = cfg.CUDNN.ENABLED

    model = eval('models.' + cfg.MODEL.NAME + '.get_pose_net')(cfg,
                                                               is_train=False)

    if cfg.TEST.MODEL_FILE:
        logger.info('=> loading model from {}'.format(cfg.TEST.MODEL_FILE))
        model.load_state_dict(torch.load(cfg.TEST.MODEL_FILE), strict=False)
    else:
        model_state_file = os.path.join(final_output_dir,
                                        modelDict[args.modelType])
        logger.info('=> loading model from {}'.format(model_state_file))
        if 'current' == args.modelType:
            model.load_state_dict(
                torch.load(model_state_file)['best_state_dict'], strict=True)
        else:
            # model.load_state_dict({k.replace('gen_attention','nl_attention'):v for k,v in torch.load(model_state_file)['best_state_dict'].items()}, strict=True)
            model.load_state_dict(
                torch.load(model_state_file)['best_state_dict'], strict=False)

    model = torch.nn.DataParallel(model, device_ids=list(range(len(
        cfg.GPUS)))).cuda()

    # define loss function (criterion) and optimizer

    if cfg.LOSS.NAME == 'ModMSE_KL_CC_NSS_Loss':
        criterion = ModMSE_KL_CC_Loss(cfg).cuda()
    else:
        criterion = eval(cfg.LOSS.NAME)(cfg).cuda()

    # Data loading code
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])

    if args.mode == 'val':
        valid_dataset = eval('dataset.' + cfg.DATASET.DATASET)(
            cfg, cfg.DATASET.ROOT, 'val', False,
            transforms.Compose([
                transforms.ToTensor(),
                normalize,
            ]))
        if cfg.DATASET.SAMPLER == "":
            valid_loader = torch.utils.data.DataLoader(
                valid_dataset,
                batch_size=cfg.TEST.BATCH_SIZE_PER_GPU * len(cfg.GPUS),
                shuffle=False,
                num_workers=cfg.WORKERS,
                pin_memory=True)
        elif cfg.DATASET.SAMPLER == "RandomIdentitySampler":
            valid_loader = torch.utils.data.DataLoader(
                valid_dataset,
                sampler=dataset.RandomIdentitySampler(
                    valid_dataset.images,
                    cfg.TEST.BATCH_SIZE_PER_GPU * len(cfg.GPUS),
                    cfg.DATASET.NUM_INSTANCES),
                batch_size=cfg.TEST.BATCH_SIZE_PER_GPU * len(cfg.GPUS),
                shuffle=False,
                num_workers=cfg.WORKERS,
                pin_memory=True)
        else:
            assert False
        # evaluate on validation set
        perf_indicator, res = validate(cfg,
                                       valid_loader,
                                       valid_dataset,
                                       model,
                                       criterion,
                                       final_output_dir,
                                       tb_log_dir,
                                       returnRes=True)

        valid_dataset.evaluate(final_output_dir, res,
                               modelDict[args.modelType].split('.')[0])
    else:
        test_dataset = eval('dataset.' + cfg.DATASET.DATASET)(
            cfg, cfg.DATASET.ROOT, 'test', False,
            transforms.Compose([
                transforms.ToTensor(),
                normalize,
            ]))
        if cfg.DATASET.SAMPLER == "":
            test_loader = torch.utils.data.DataLoader(
                test_dataset,
                batch_size=cfg.TEST.BATCH_SIZE_PER_GPU * len(cfg.GPUS),
                shuffle=False,
                num_workers=cfg.WORKERS,
                pin_memory=True)
        elif cfg.DATASET.SAMPLER == "RandomIdentitySampler":
            test_loader = torch.utils.data.DataLoader(
                test_dataset,
                sampler=dataset.RandomIdentitySampler(
                    test_dataset.images,
                    cfg.TEST.BATCH_SIZE_PER_GPU * len(cfg.GPUS),
                    cfg.DATASET.NUM_INSTANCES),
                batch_size=cfg.TEST.BATCH_SIZE_PER_GPU * len(cfg.GPUS) //
                cfg.DATASET.NUM_INSTANCES,
                shuffle=False,
                num_workers=cfg.WORKERS,
                pin_memory=True)
        else:
            assert False
        output_dir = os.path.join(final_output_dir, cfg.TEST.OUT_DIR)
        if not os.path.exists(output_dir):
            os.mkdir(output_dir)
        test(cfg, test_loader, model, output_dir)
def main():
    args = parse_args()
    update_config(cfg, args)

    logger, final_output_dir, tb_log_dir = create_logger(
        cfg, args.cfg, 'valid')

    model = eval('models.'+cfg.MODEL.NAME+'.get_pose_net')(
        cfg, is_train=False
    )

    logger.info('=> loading model from {}'.format(cfg.TEST.MODEL_FILE))
    model.load_state_dict(torch.load(cfg.TEST.MODEL_FILE, map_location=torch.device('cpu')), strict=False)

    # Data loading code
    normalize = transforms.Normalize(
        mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
    )
    valid_dataset = eval('dataset.'+cfg.DATASET.DATASET)(
        cfg, cfg.DATASET.ROOT, cfg.DATASET.TEST_SET, False,
        transforms.Compose([
            transforms.ToTensor(),
            normalize,
        ])
    )
    valid_loader = torch.utils.data.DataLoader(
        valid_dataset,
        batch_size=1,
        shuffle=False,
        num_workers=cfg.WORKERS,
        pin_memory=True
    )

    process_time = AverageMeter()

    # switch to evaluate mode
    model.eval()

    if args.convert_onnx:
        x_tensor = torch.rand(1, 3, 256, 192)
        torch.onnx.export(model.cpu(), x_tensor.cpu(), 'model.onnx', export_params=True,
                          operator_export_type=torch.onnx.OperatorExportTypes.ONNX,
                          opset_version=9,
                          verbose=False)
        logger.info('Model is converted to ONNX')

    with torch.no_grad():
        for i, (input, target, target_weight, meta) in enumerate(valid_loader):
            start_time = time.time()
            # compute output
            output = model(input)

            batch_heatmaps = output.clone().cpu().numpy()
            coords, maxvals = get_max_preds(batch_heatmaps)

            # measure elapsed time
            process_time.update(time.time() - start_time)

            prefix = '{}_{}'.format(
                os.path.join(final_output_dir, 'val'), i
            )
            save_debug_images(cfg, input, meta, target, coords * 4, output,
                              prefix)

            if i == 100:
                break

        logger.info(f'PyTorch: Inference EngineAverage processing time of model:{process_time.avg}')
Exemplo n.º 22
0
def main():
    args = parse_args()
    update_config(cfg, args)

    logger, final_output_dir, tb_log_dir = create_logger(
        cfg, args.cfg, 'valid')

    logger.info(pprint.pformat(args))
    logger.info(cfg)

    if torch.cuda.is_available():
        device = torch.device("cuda:0")
        batch_size = cfg.TEST.BATCH_SIZE_PER_GPU * torch.cuda.device_count()
        logger.info("Let's use %d GPUs!" % torch.cuda.device_count())

    # cudnn related setting
    cudnn.benchmark = cfg.CUDNN.BENCHMARK
    torch.backends.cudnn.deterministic = cfg.CUDNN.DETERMINISTIC
    torch.backends.cudnn.enabled = cfg.CUDNN.ENABLED

    model = eval('models.' + cfg.MODEL.NAME + '.get_pose_net')(cfg,
                                                               is_train=False)

    if cfg.TEST.MODEL_FILE:
        logger.info('=> loading model from {}'.format(cfg.TEST.MODEL_FILE))
        model.load_state_dict(torch.load(cfg.TEST.MODEL_FILE),
                              strict=True)  # False
    else:
        model_state_file = os.path.join(final_output_dir, 'final_state.pth')
        logger.info('=> loading model from {}'.format(model_state_file))
        model.load_state_dict(torch.load(model_state_file))

    # model = torch.nn.DataParallel(model, device_ids=cfg.GPUS).cuda()
    model = torch.nn.DataParallel(model).cuda()

    # define loss function (criterion) and optimizer
    criterion = JointsMSELoss(
        cfg=cfg,
        target_type=cfg.MODEL.TARGET_TYPE,
        use_target_weight=cfg.LOSS.USE_TARGET_WEIGHT).cuda()

    # Data loading code
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])
    valid_dataset = eval('dataset.' + cfg.DATASET.DATASET)(
        cfg, cfg.DATASET.ROOT, cfg.DATASET.TEST_SET, False,
        transforms.Compose([
            transforms.ToTensor(),
            normalize,
        ]))
    valid_loader = torch.utils.data.DataLoader(
        valid_dataset,
        # batch_size=cfg.TEST.BATCH_SIZE_PER_GPU*len(cfg.GPUS),
        batch_size=batch_size,
        shuffle=False,
        num_workers=cfg.WORKERS,
        pin_memory=True)

    logger.info('=> Start testing...')

    # evaluate on validation set
    validate(cfg, valid_loader, valid_dataset, model, criterion,
             final_output_dir, tb_log_dir)
Exemplo n.º 23
0
def main():

    # 주요 path 정의
    data_path = './data'
    imgs = os.listdir(f'{data_path}/images/test_imgs/')

    # config 파일을 가져옵니다.
    args = parse_args()
    update_config(cfg, args)

    # cudnn related setting
    cudnn.benchmark = cfg.CUDNN.BENCHMARK
    torch.backends.cudnn.deterministic = cfg.CUDNN.DETERMINISTIC
    torch.backends.cudnn.enabled = cfg.CUDNN.ENABLED

    batch_size = cfg.TEST.BATCH_SIZE_PER_GPU
    output_path = cfg.output_path

    # model 정의
    model = eval('models.' + cfg.MODEL.NAME + '.get_pose_net')(cfg,
                                                               is_train=False)

    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    model = model.to(device)

    # 모델의 가중치 값을 가져옵니다.
    model.load_state_dict(torch.load(output_path + '/model_best.pth'))

    # Data transforms
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])

    # test dataset을 만듭니다.
    test_dataset = TestDataset(cfg=cfg,
                               root=data_path,
                               image_set=imgs,
                               is_train=False,
                               phase='test',
                               transform=transforms.Compose(
                                   [transforms.ToTensor(), normalize]))

    test_loader = data_utils.DataLoader(test_dataset,
                                        batch_size=batch_size,
                                        shuffle=False)

    # evaluate on validation set
    y_pred = np.array([])

    filenames = []
    with torch.no_grad():

        for i, (input, meta) in enumerate(test_loader):

            input = input.to(device)
            input = input.float()

            outputs = model(input)
            if isinstance(outputs, list):
                output = outputs[-1]
            else:
                output = outputs

            # meta 데이터에서 image의 center값, scale 값을 불러옵니다.
            c = meta['center'].numpy()
            s = meta['scale'].numpy()

            # heatmap을 다시 변형시켜 keypoint 값으로 바꿉니다
            preds, maxvals = get_final_preds(cfg,
                                             output.clone().cpu().numpy(), c,
                                             s)

            # 예측값 정리하는 부분
            preds = preds.reshape(preds.shape[0], -1)

            if len(y_pred) == 0:
                y_pred = preds

            else:
                y_pred = np.r_[y_pred, preds]

            filenames += meta['filename']

    # 최종 test data에 대한 keypoint 결과를 저장합니다.
    df_sub = pd.read_csv(f'{data_path}/sample_submission.csv')
    df = pd.DataFrame(columns=df_sub.columns)
    df['image'] = filenames
    df.iloc[:, 1:] = y_pred
    df.head()

    df.to_csv(f'{output_path}/result.csv', index=False)
    torch.cuda.empty_cache()
Exemplo n.º 24
0
def main():
    args = parse_args()
    update_config(cfg, args)
    img_root_dir = '/input0/MPII/images'

    logger, final_output_dir, tb_log_dir = create_logger(
        cfg, args.cfg, 'valid')

    # cudnn related setting
    cudnn.benchmark = cfg.CUDNN.BENCHMARK
    torch.backends.cudnn.deterministic = cfg.CUDNN.DETERMINISTIC
    torch.backends.cudnn.enabled = cfg.CUDNN.ENABLED

    model = eval('models.' + cfg.MODEL.NAME + '.get_pose_net')(cfg,
                                                               is_train=False)

    if cfg.TEST.MODEL_FILE:
        logger.info('=> loading model from {}'.format(cfg.TEST.MODEL_FILE))
        model.load_state_dict(torch.load(cfg.TEST.MODEL_FILE), strict=False)
    else:
        model_state_file = os.path.join(final_output_dir, 'final_state.pth')
        logger.info('=> loading model from {}'.format(model_state_file))
        model.load_state_dict(torch.load(model_state_file))

    model = torch.nn.DataParallel(model, device_ids=(0, )).cuda()

    img = torch.randn(1, 3, 256, 256)
    output = model(img)
    print(output.size())

    #加载数据
    with open('/input0/MPII/annot/valid.json') as fr:
        valid = json.load(fr)
    print('一共有%d张图片' % len(valid))
    for i in range(10):
        a = valid[i]
        img_name = a['image']
        img_path = '%s/%s' % ('/input0/MPII/images', img_name)
        c = np.array(a['center'], dtype=np.float)
        s = np.array([a['scale'], a['scale']], dtype=np.float)
        if c[0] != -1:
            c[1] = c[1] + 15 * s[1]
            s = s * 1.25
        c = c - 1
        r = 0
        #仿射变换
        trans = get_affine_transform(c, s, r, output_size=[256, 256])
        print(trans.shape)

        data_numpy = io.imread('%s/%s' % (img_root_dir, img_name))
        input = cv2.warpAffine(data_numpy,
                               trans, (int(256), int(256)),
                               flags=cv2.INTER_LINEAR)

        input_2 = cv2.warpAffine(data_numpy,
                                 trans, (int(256), int(256)),
                                 flags=cv2.INTER_LINEAR)

        print(input.shape)

        #仿射变换后,关节点的坐标应发生相应的变换
        joints = np.array(a['joints'])
        joints_vis = np.array(a['joints_vis'])
        num_joints = 16
        joints_gt = np.zeros((num_joints, 3), dtype=np.float)
        joints_vis_gt = np.zeros((num_joints, 3), dtype=np.float)

        joints_gt[:, 0:2] = joints[:, 0:2] - 1
        joints_vis_gt[:, 0] = joints_vis[:]
        joints_vis_gt[:, 1] = joints_vis[:]

        for i in range(16):
            if joints_vis_gt[i, 0] > 0.0:
                joints[i, 0:2] = affine_transform(joints[i, 0:2], trans)
        #先画真实的label图
        for each_point in joints:

            rr, cc = draw.circle(each_point[1], each_point[0], 2)

            draw.set_color(input, [rr, cc], [0, 255, 0])
        io.imsave('%s/%s_%d.jpg' % ('save_img', img_name.split('.')[0], i),
                  input)

        #预测,需要将numpy转换成tensor
        normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                         std=[0.229, 0.224, 0.225])
        input_t = transforms.Compose([transforms.ToTensor(), normalize])(input)
        input_t = torch.unsqueeze(input_t, 0)
        print('input_t.size():', input_t.size())
        predict_img = model(input_t)
        #         print(predict_img.size())

        #将predict_img:(1,16,64,64),转换为(16,2)的坐标,以显示预测出来的图
        #lib/core/inference.py里面的get_max_preds函数

        batch_heatmaps = predict_img.detach().cpu().numpy(
        )  #batch_heatmaps的类型应为numpy.ndarry()
        batch_size = batch_heatmaps.shape[0]
        num_joints = batch_heatmaps.shape[1]
        width = batch_heatmaps.shape[3]
        heatmaps_reshaped = batch_heatmaps.reshape(
            (batch_size, num_joints, -1))
        idx = np.argmax(heatmaps_reshaped, 2)
        maxvals = np.amax(heatmaps_reshaped, 2)

        maxvals = maxvals.reshape((batch_size, num_joints, 1))
        idx = idx.reshape((batch_size, num_joints, 1))

        preds = np.tile(idx, (1, 1, 2)).astype(np.float32)

        preds[:, :, 0] = (preds[:, :, 0]) % width
        preds[:, :, 1] = np.floor((preds[:, :, 1]) / width)

        pred_mask = np.tile(np.greater(maxvals, 0.0), (1, 1, 2))
        pred_mask = pred_mask.astype(np.float32)

        preds *= pred_mask
        preds *= 4
        print(preds.shape)
        #画出预测图
        for each_point in preds[0, :, :]:

            rr, cc = draw.circle(each_point[1], each_point[0], 2)

            draw.set_color(input_2, [rr, cc], [0, 255, 0])
        io.imsave('%s/%s_%d_pre.jpg' % ('save_img', img_name.split('.')[0], i),
                  input_2)
Exemplo n.º 25
0
def main():
    args = parse_args()
    update_config(cfg, args)
    check_config(cfg)

    logger, final_output_dir, tb_log_dir = create_logger(
        cfg, args.cfg, 'valid')

    logger.info(pprint.pformat(args))
    logger.info(cfg)

    # cudnn related setting
    cudnn.benchmark = cfg.CUDNN.BENCHMARK
    torch.backends.cudnn.deterministic = cfg.CUDNN.DETERMINISTIC
    torch.backends.cudnn.enabled = cfg.CUDNN.ENABLED

    model = eval('models.' + cfg.MODEL.NAME + '.get_pose_net')(cfg,
                                                               is_train=False)

    dump_input = torch.rand(
        (1, 3, cfg.DATASET.INPUT_SIZE, cfg.DATASET.INPUT_SIZE))
    logger.info(get_model_summary(model, dump_input, verbose=cfg.VERBOSE))

    if cfg.FP16.ENABLED:
        model = network_to_half(model)

    if cfg.TEST.MODEL_FILE:
        logger.info('=> loading model from {}'.format(cfg.TEST.MODEL_FILE))
        model.load_state_dict(torch.load(cfg.TEST.MODEL_FILE), strict=True)
    else:
        model_state_file = os.path.join(final_output_dir, 'model_best.pth.tar')
        logger.info('=> loading model from {}'.format(model_state_file))
        model.load_state_dict(torch.load(model_state_file))

    model = torch.nn.DataParallel(model, device_ids=cfg.GPUS).cuda()
    model.eval()

    data_loader, test_dataset = make_test_dataloader(cfg)

    if cfg.MODEL.NAME == 'pose_hourglass':
        transforms = torchvision.transforms.Compose([
            torchvision.transforms.ToTensor(),
        ])
    else:
        transforms = torchvision.transforms.Compose([
            torchvision.transforms.ToTensor(),
            torchvision.transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                             std=[0.229, 0.224, 0.225])
        ])

    parser = HeatmapParser(cfg)
    all_preds = []
    all_scores = []

    pbar = tqdm(total=len(test_dataset)) if cfg.TEST.LOG_PROGRESS else None
    for i, (images, annos) in enumerate(data_loader):
        print(">>>", i)
        assert 1 == images.size(0), 'Test batch size should be 1'

        image = images[0].cpu().numpy()
        # size at scale 1.0
        base_size, center, scale = get_multi_scale_size(
            image, cfg.DATASET.INPUT_SIZE, 1.0, min(cfg.TEST.SCALE_FACTOR))

        with torch.no_grad():
            final_heatmaps = None
            tags_list = []
            for idx, s in enumerate(sorted(cfg.TEST.SCALE_FACTOR,
                                           reverse=True)):
                input_size = cfg.DATASET.INPUT_SIZE
                image_resized, center, scale = resize_align_multi_scale(
                    image, input_size, s, min(cfg.TEST.SCALE_FACTOR))
                image_resized = transforms(image_resized)
                image_resized = image_resized.unsqueeze(0).cuda()

                outputs, heatmaps, tags = get_multi_stage_outputs(
                    cfg, model, image_resized, cfg.TEST.FLIP_TEST,
                    cfg.TEST.PROJECT2IMAGE, base_size)

                ###
                ags = [t[:, 0].unsqueeze(-1).unsqueeze(0) for t in tags][0]

                # This basically a list comprehension for the tags, rescaling
                # and making 2D out of the flip.
                final_heatmaps, tags_list = aggregate_results(
                    cfg, s, final_heatmaps, tags_list, heatmaps, tags)
            final_heatmaps = final_heatmaps / float(len(
                cfg.TEST.SCALE_FACTOR))  # float (1, 17, 640, 960)

            tags = torch.cat(tags_list, dim=4)  # float (1, 17, 640, 960, 2)

            AGS = True  # ags is [1, 1, 640, 960, 1]
            if AGS:
                parser.tag_per_joint = False
                grouped, scores = parser.parse(final_heatmaps, ags, True, True)
            else:
                grouped, scores = parser.parse(  # True,   True
                    final_heatmaps, tags, cfg.TEST.ADJUST, cfg.TEST.REFINE)

            final_results = get_final_preds(
                grouped, center, scale,
                [final_heatmaps.size(3),
                 final_heatmaps.size(2)])

        if cfg.TEST.LOG_PROGRESS:
            pbar.update()

        if i % cfg.PRINT_FREQ == 0:
            prefix = '{}_{}'.format(
                os.path.join(final_output_dir, 'result_valid'), i)
            # logger.info('=> write {}'.format(prefix))

            save_valid_image(image,
                             final_results,
                             '{}.jpg'.format(prefix),
                             dataset=test_dataset.name)
            # save_debug_images(cfg, image_resized, None, None, outputs, prefix)

        all_preds.append(final_results)
        all_scores.append(scores)

    if cfg.TEST.LOG_PROGRESS:
        pbar.close()

    name_values, _ = test_dataset.evaluate(cfg, all_preds, all_scores,
                                           final_output_dir)

    if isinstance(name_values, list):
        for name_value in name_values:
            _print_name_value(logger, name_value, cfg.MODEL.NAME)
    else:
        _print_name_value(logger, name_values, cfg.MODEL.NAME)
Exemplo n.º 26
0
import torch
from config import cfg
from config import update_config
from TorchSUL import Model as M
import models.hrnet_dekr
import loss
import hrnet

update_config(cfg)
model_dnet = models.hrnet_dekr.get_pose_net(cfg, is_train=False)
# model = loss.ModelWithLoss(model_dnet)
# M.Saver(model).restore('./model/', strict=False)
# # print(model.model)
# model = model.model
model_dnet.load_state_dict(torch.load('pose_dekr_hrnetw32_coco.pth'))
model = model_dnet
model.eval()

net_dekr = hrnet.DEKR(17)

x = torch.ones(1, 3, 512, 512)
net_dekr(x)
net_dekr.eval()
net_dekr.bn_eps(1e-5)
net = net_dekr.backbone

source_params = {'other': [], 'fuse': []}
target_params = {'other': [], 'fuse': []}
source_buffs = {'other': [], 'fuse': []}
target_buffs = {'other': [], 'fuse': []}
Exemplo n.º 27
0
def main():
    args = parse_args()
    update_config(cfg, args)

    if args.prevModelDir and args.modelDir:
        # copy pre models for philly
        copy_prev_models(args.prevModelDir, args.modelDir)

    logger, final_output_dir, tb_log_dir = create_logger(
        cfg, args.cfg, 'valid')

    logger.info('########################################')

    # cudnn related setting
    cudnn.benchmark = cfg.CUDNN.BENCHMARK
    torch.backends.cudnn.deterministic = cfg.CUDNN.DETERMINISTIC
    torch.backends.cudnn.enabled = cfg.CUDNN.ENABLED

    model = eval('models.' + cfg.MODEL.NAME + '.get_pose_net')(cfg,
                                                               is_train=False)

    if cfg.TEST.MODEL_FILE:
        #logger.info('=> loading model from {}'.format(cfg.TEST.MODEL_FILE))
        model.load_state_dict(torch.load(cfg.TEST.MODEL_FILE), strict=False)
    else:
        model_state_file = os.path.join(final_output_dir, 'final_state.pth')
        #logger.info('=> loading model from {}'.format(model_state_file))
        model.load_state_dict(torch.load(model_state_file))

    model = torch.nn.DataParallel(model, device_ids=cfg.GPUS).cuda()

    # define loss function (criterion) and optimizer
    criterion = JointsMSELoss(
        use_target_weight=cfg.LOSS.USE_TARGET_WEIGHT).cuda()

    # Data loading code
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])

    valid_dataset = eval('dataset.' + cfg.DATASET.DATASET)(
        cfg, cfg.DATASET.ROOT, cfg.DATASET.TEST_SET, False,
        transforms.Compose([
            transforms.ToTensor(),
            normalize,
        ]))
    valid_loader = torch.utils.data.DataLoader(
        valid_dataset,
        batch_size=cfg.TEST.BATCH_SIZE_PER_GPU * len(cfg.GPUS),
        shuffle=False,
        num_workers=cfg.WORKERS,
        pin_memory=True)

    ### importing train/validate functions
    if not cfg.MODEL.SPATIOTEMPORAL_POSE_AGGREGATION:
        from core.function import validate
    else:
        from core.function_PoseAgg import validate
    ####

    # evaluate on validation set

    #logger.info('###  Method: {} ###'.format(cfg.EXPERIMENT_NAME))
    validate(cfg, valid_loader, valid_dataset, model, criterion,
             final_output_dir, tb_log_dir)
Exemplo n.º 28
0
def main():
    args = parse_args()
    update_config(cfg, args)

    setup_seed(cfg.SEED)

    os.environ["CUDA_VISIBLE_DEVICES"] = ','.join([str(x) for x in cfg.GPUS])

    logger, final_output_dir, tb_log_dir = create_logger(
        cfg, args.cfg, args.mention, 'train')

    logger.info(pprint.pformat(args))
    logger.info(cfg)

    # cudnn related setting
    cudnn.benchmark = cfg.CUDNN.BENCHMARK
    torch.backends.cudnn.deterministic = cfg.CUDNN.DETERMINISTIC
    torch.backends.cudnn.enabled = cfg.CUDNN.ENABLED

    model = eval('models.' + cfg.MODEL.NAME + '.get_pose_net')(cfg,
                                                               is_train=True)
    # print(model)

    # copy model file
    this_dir = os.path.dirname(__file__)
    shutil.copy2(
        os.path.join(this_dir, '../lib/models', cfg.MODEL.NAME + '.py'),
        final_output_dir)
    # logger.info(pprint.pformat(model))

    writer_dict = {
        'writer': SummaryWriter(log_dir=tb_log_dir),
        'train_global_steps': 0,
        'valid_global_steps': 0,
    }

    dump_input = torch.rand(
        (1, 3, cfg.MODEL.IMAGE_SIZE[0], cfg.MODEL.IMAGE_SIZE[1]))

    try:
        writer_dict['writer'].add_graph(model, (dump_input, ))
    except Exception as e:
        logger.info(e)

    try:
        logger.info(get_model_summary(model, dump_input))
    except:
        pass

    model = torch.nn.DataParallel(model, device_ids=list(range(len(
        cfg.GPUS)))).cuda()

    # define loss function (criterion) and optimizer
    criterion = eval(cfg.LOSS.NAME)(cfg).cuda()

    if cfg.LOSS.NAME == 'ModMSE_KL_CC_NSS_Loss':
        criterion_val = ModMSE_KL_CC_Loss(cfg).cuda()
    else:
        criterion_val = criterion

    # Data loading code
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])
    train_dataset = eval('dataset.' + cfg.DATASET.DATASET)(
        cfg, cfg.DATASET.ROOT, cfg.DATASET.TRAIN_SET, True,
        transforms.Compose([
            transforms.ToTensor(),
            normalize,
        ]))
    valid_dataset = eval('dataset.' + cfg.DATASET.DATASET)(
        cfg, cfg.DATASET.ROOT, cfg.DATASET.TEST_SET, False,
        transforms.Compose([
            transforms.ToTensor(),
            normalize,
        ]))

    logger.info(os.linesep +
                'train_set : {:d} entries'.format(len(train_dataset)))
    logger.info('val_set   : {:d} entries'.format(len(valid_dataset)) +
                os.linesep)

    if cfg.DATASET.SAMPLER == "":
        train_loader = torch.utils.data.DataLoader(
            train_dataset,
            batch_size=cfg.TRAIN.BATCH_SIZE_PER_GPU * len(cfg.GPUS),
            shuffle=cfg.TRAIN.SHUFFLE,
            num_workers=cfg.WORKERS,
            pin_memory=cfg.PIN_MEMORY)
        valid_loader = torch.utils.data.DataLoader(
            valid_dataset,
            batch_size=cfg.TEST.BATCH_SIZE_PER_GPU * len(cfg.GPUS),
            shuffle=False,
            num_workers=cfg.WORKERS,
            pin_memory=cfg.PIN_MEMORY)
    elif cfg.DATASET.SAMPLER == "RandomIdentitySampler":
        train_loader = torch.utils.data.DataLoader(
            train_dataset,
            sampler=dataset.RandomIdentitySampler(
                train_dataset.images,
                cfg.TRAIN.BATCH_SIZE_PER_GPU * len(cfg.GPUS),
                cfg.DATASET.NUM_INSTANCES),
            batch_size=cfg.TRAIN.BATCH_SIZE_PER_GPU * len(cfg.GPUS) //
            cfg.DATASET.NUM_INSTANCES,
            shuffle=False,
            num_workers=cfg.WORKERS,
            pin_memory=cfg.PIN_MEMORY)
        valid_loader = torch.utils.data.DataLoader(
            valid_dataset,
            sampler=dataset.RandomIdentitySampler(
                valid_dataset.images,
                cfg.TEST.BATCH_SIZE_PER_GPU * len(cfg.GPUS),
                cfg.DATASET.NUM_INSTANCES),
            batch_size=cfg.TEST.BATCH_SIZE_PER_GPU * len(cfg.GPUS) //
            cfg.DATASET.NUM_INSTANCES,
            shuffle=False,
            num_workers=cfg.WORKERS,
            pin_memory=cfg.PIN_MEMORY)
    else:
        assert False

    best_perf = None
    best_model = False
    last_epoch = -1
    optimizer = get_optimizer(cfg, model)
    begin_epoch = cfg.TRAIN.BEGIN_EPOCH
    checkpoint_file = os.path.join(final_output_dir, 'checkpoint.pth')

    if cfg.AUTO_RESUME and os.path.exists(checkpoint_file):
        logger.info("=> loading checkpoint '{}'".format(checkpoint_file))
        checkpoint = torch.load(checkpoint_file)
        begin_epoch = checkpoint['epoch']
        best_perf = checkpoint['perf']
        last_epoch = checkpoint['epoch']
        model.load_state_dict(checkpoint['state_dict'])

        optimizer.load_state_dict(checkpoint['optimizer'])
        logger.info("=> loaded checkpoint '{}' (epoch {})".format(
            checkpoint_file, checkpoint['epoch']))

    if cfg.TRAIN.WARMUP_EPOCHS > 0:
        lr_scheduler = WarmupMultiStepLR(optimizer,
                                         cfg.TRAIN.LR_STEP,
                                         cfg.TRAIN.LR_FACTOR,
                                         warmup_iters=cfg.TRAIN.WARMUP_EPOCHS,
                                         last_epoch=last_epoch)
    else:
        lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(
            optimizer,
            cfg.TRAIN.LR_STEP,
            cfg.TRAIN.LR_FACTOR,
            last_epoch=last_epoch)

    for epoch in range(begin_epoch, cfg.TRAIN.END_EPOCH):
        lr_scheduler.step()

        # torch.cuda.empty_cache()

        # train for one epoch
        train(cfg, train_loader, model, criterion, optimizer, epoch,
              final_output_dir, tb_log_dir, writer_dict)

        # torch.cuda.empty_cache()

        # evaluate on validation set
        perf_indicator, is_larger_better = validate(cfg, valid_loader,
                                                    valid_dataset, model,
                                                    criterion_val,
                                                    final_output_dir,
                                                    tb_log_dir, writer_dict)

        if is_larger_better:
            if best_perf is None or perf_indicator >= best_perf:
                best_perf = perf_indicator
                best_model = True
            else:
                best_model = False
        else:
            if best_perf is None or perf_indicator <= best_perf:
                best_perf = perf_indicator
                best_model = True
            else:
                best_model = False

        logger.info('=> saving checkpoint to {}'.format(final_output_dir))
        save_checkpoint(
            {
                'epoch': epoch + 1,
                'model': cfg.MODEL.NAME,
                'state_dict': model.state_dict(),
                'best_state_dict': model.module.state_dict(),
                'perf': perf_indicator,
                'optimizer': optimizer.state_dict(),
            }, best_model, final_output_dir)

    final_model_state_file = os.path.join(final_output_dir, 'final_state.pth')
    logger.info(
        '=> saving final model state to {}'.format(final_model_state_file))
    torch.save(model.module.state_dict(), final_model_state_file)
    writer_dict['writer'].close()
def getprunemodel(percent=0.6, index=0):
    args = parse_args()
    update_config(cfg, args)

    logger, final_output_dir, tb_log_dir = create_logger(
        cfg, args.cfg, 'valid')
    final_output_dir += ('/' + str(index))
    tb_log_dir += ('/' + str(index))
    logger.info(pprint.pformat(args))
    logger.info(cfg)

    # cudnn related setting
    cudnn.benchmark = cfg.CUDNN.BENCHMARK
    torch.backends.cudnn.deterministic = cfg.CUDNN.DETERMINISTIC
    torch.backends.cudnn.enabled = cfg.CUDNN.ENABLED

    model = eval('models.' + cfg.MODEL.NAME + '.get_pose_net')(cfg,
                                                               is_train=False)
    #oldmodel = eval('models.'+cfg.MODEL.NAME+'.get_pose_net')(
    #cfg, is_train=False
    #)
    if cfg.TEST.MODEL_FILE:
        logger.info('=> loading model from {}'.format(cfg.TEST.MODEL_FILE))
        model.load_state_dict(torch.load(cfg.TEST.MODEL_FILE), strict=False)
        #oldmodel.load_state_dict(torch.load(cfg.TEST.MODEL_FILE), strict=False)
    else:
        model_state_file = os.path.join(final_output_dir, 'final_state.pth')
        logger.info('=> loading model from {}'.format(model_state_file))
        model.load_state_dict(torch.load(model_state_file))
        #oldmodel.load_state_dict(torch.load(model_state_file))
    model = torch.nn.DataParallel(model, device_ids=cfg.GPUS).cuda()
    #oldmodel = torch.nn.DataParallel(oldmodel, device_ids=cfg.GPUS).cuda()
    #print('旧模型: ', model)
    modules = list(model.modules())

    purn_bn = []
    for k, m in enumerate(modules):
        name = m._get_name()
        if name == 'Bottleneck':
            purn_bn.append(k + 2)
            purn_bn.append(k + 4)
        if name == 'BasicBlock':
            purn_bn.append(k + 2)

    # for m in modules:
    #     m = list(m.modules)
    #     for mi in m:
    #         print(1)
    # modules = list(model.modules())
    # model_channel_dir = {}
    # for key in model.modules():
    #     while(len(key._modules)!=0):
    #         model_channel_dir['key._get_name()'] = {}

    #     if (len(key._modules)==0):
    #         print(key)
    #     else:
    #         print(key._get_name())
    #         for keyi in key.modules():
    #             print(keyi)
    total = 0
    i = 0
    for m in model.modules():
        if i not in purn_bn:
            i += 1
            continue
        if isinstance(m, nn.BatchNorm2d):
            total += m.weight.data.shape[0]
            i += 1
        else:
            print("error! plase check lyer type")
    bn = torch.zeros(total)
    bnb = torch.zeros(total)
    index = 0
    i = 0
    for m in model.modules():
        if i not in purn_bn:
            i += 1
            continue
        if isinstance(m, nn.BatchNorm2d):
            size = m.weight.data.shape[0]
            bn[index:(index + size)] = m.weight.data.abs().clone()
            bnb[index:(index + size)] = m.bias.data.abs().clone()
            index += size
            i += 1
    y, i = torch.sort(bnb, descending=True)
    thre_index = int(total * percent)
    thre = y[thre_index]

    yw, iw = torch.sort(bn)
    thre_indexw = int(total * percent)
    threw = yw[thre_indexw]

    pruned = 0
    ncfg = []
    allcfg = []
    allcfg_mask = []
    ncfg_mask = []
    old_index = []
    for k, m in enumerate(model.modules()):
        if isinstance(m, nn.BatchNorm2d):
            if k not in purn_bn:
                bias_copy = m.bias.data.abs().clone()
                weight_copy = m.weight.data.abs().clone()
                maskb = bias_copy.gt(0).float().cuda()
                maskw = weight_copy.gt(0).float().cuda()
                mask_copy = maskb.clone() + maskw.clone()
                mask = mask_copy.gt(0).float().cuda()
                allcfg.append(int(mask.shape[0]))
                allcfg_mask.append(mask.clone())
                #ncfg.append(int(mask.shape[0]))
                #ncfg_mask.append(mask.clone())
                print(
                    'layer index: {:d} \t total channel: {:d} \t remaining channel: {:d}'
                    .format(k, mask.shape[0], mask.shape[0]))
                continue
            bias_copy = m.bias.data.abs().clone()
            weight_copy = m.weight.data.abs().clone()
            #weight_temp = weight_copy.gt(float(weight_copy.data.max)).float()
            maskb = bias_copy.le(thre).float().cuda()
            maskw = weight_copy.gt(threw).float().cuda()
            mask = maskb.clone() * maskw.clone()
            #mask_copy = maskb.clone() + maskw.clone()
            minchannels = int(bias_copy.size(0) * (1 - percent) / 4)
            if (int(torch.sum(mask)) <= minchannels):
                index = minremainindex(bias_copy,
                                       minchannels,
                                       descending=False)
                for iii in index:
                    mask[iii] = 1
            pruned = pruned + mask.shape[0] - max(int(torch.sum(mask)), 1)
            m.weight.data.mul_(mask)
            m.bias.data.mul_(mask)
            ncfg.append(max(1, int(torch.sum(mask))))
            ncfg_mask.append(mask.clone())
            allcfg.append(max(1, int(torch.sum(mask))))
            allcfg_mask.append(mask.clone())
            print(
                'layer index: {:d} \t total channel: {:d} \t remaining channel: {:d}'
                .format(k, mask.shape[0], int(torch.sum(mask))))
        elif isinstance(m, nn.MaxPool2d):
            ncfg.append('M')
    pruned_ratio = pruned / total
    print('Pre-processing Successful!')

    #perf_indicator = test(model)
    print("new cfg:")
    print(ncfg)
    newmodel = eval('models.' + 'purnpose_hrnet' + '.get_pose_net')(
        cfg, ncfg, is_train=False)  #PosePurnHighResolutionNet(cfg,ncfg)
    newmodel = torch.nn.DataParallel(newmodel, device_ids=cfg.GPUS).cuda()
    newmodules = list(newmodel.modules())
    print("newmodelcreate!")

    print("init newmodel weight:")
    num_parameters = sum([param.nelement() for param in newmodel.parameters()])
    savepath = os.path.join(args.save, "prune.txt")
    with open(savepath, "w") as fp:
        fp.write("Configuration: \n" + str(ncfg) + "\n")
        fp.write("Number of parameters: \n" + str(num_parameters) + "\n")
    layer_id_in_cfg = 0
    start_mask = torch.ones(3)
    end_mask = allcfg_mask[layer_id_in_cfg]
    conv_count = 0
    for layer_id in range(len(modules)):
        m0 = modules[layer_id]
        m1 = newmodules[layer_id]
        if isinstance(m0, nn.BatchNorm2d):
            idx1 = np.squeeze(np.argwhere(np.asarray(end_mask.cpu().numpy())))
            if idx1.size == 1:
                idx1 = np.resize(idx1, (1, ))
            if layer_id not in purn_bn:
                m1.weight.data = m0.weight.data.clone()
                m1.bias.data = m0.bias.data.clone()
                m1.running_mean = m0.running_mean.clone()
                m1.running_var = m0.running_var.clone()
                layer_id_in_cfg += 1
                start_mask = end_mask.clone()
                if layer_id_in_cfg < len(
                        allcfg_mask):  # do not change in Final FC
                    end_mask = allcfg_mask[layer_id_in_cfg]
            else:
                m1.weight.data = m0.weight.data[idx1.tolist()].clone()
                m1.bias.data = m0.bias.data[idx1.tolist()].clone()
                m1.running_mean = m0.running_mean[idx1.tolist()].clone()
                m1.running_var = m0.running_var[idx1.tolist()].clone()
                layer_id_in_cfg += 1
                start_mask = end_mask.clone()
                if layer_id_in_cfg < len(
                        allcfg_mask):  # do not change in Final FC
                    end_mask = allcfg_mask[layer_id_in_cfg]
        elif isinstance(m0, nn.Conv2d):
            if conv_count == 0:
                m1.weight.data = m0.weight.data.clone()
                conv_count += 1
                continue
            if ((layer_id + 1) in purn_bn):  #单输出层和输出输入层剪枝
                conv_count += 1
                idx0 = np.squeeze(
                    np.argwhere(np.asarray(start_mask.cpu().numpy())))
                idx1 = np.squeeze(
                    np.argwhere(np.asarray(end_mask.cpu().numpy())))
                if idx0.size == 1:
                    idx0 = np.resize(idx0, (1, ))
                if idx1.size == 1:
                    idx1 = np.resize(idx1, (1, ))
                w1 = m0.weight.data[idx1.tolist(), :, :, :].clone()
                #w1 = m0.weight.data[:, idx0.tolist(), :, :].clone()
                if ((layer_id - 1) in purn_bn) or ((layer_id - 2)
                                                   in purn_bn):  #输入层剪枝
                    w1 = w1[:, idx0.tolist(), :, :].clone()
                    print('Iut shape {:d}.'.format(idx0.size))
                else:
                    print('Iut shape {:d}.'.format(w1.shape[1]))
                #w2 = w1[idx1.tolist(), :, :, :].clone()
                print('Out shape {:d}.'.format(idx1.size))
                m1.weight.data = w1.clone()
                continue
            if ((layer_id - 1) in purn_bn) or ((layer_id - 2)
                                               in purn_bn):  #单输入层剪枝
                conv_count += 1
                idx0 = np.squeeze(
                    np.argwhere(np.asarray(start_mask.cpu().numpy())))
                if idx0.size == 1:
                    idx0 = np.resize(idx0, (1, ))
                w1 = m0.weight.data[:, idx0.tolist(), :, :].clone()
                m1.weight.data = w1.clone()
                print('Iut shape {:d}.'.format(idx0.size))
                print("Out shape {:d}".format(w1.shape[0]))
                continue
            m1.weight.data = m0.weight.data.clone()
    torch.save({
        'cfg': cfg,
        'ncfg': ncfg,
        'state_dict': newmodel.state_dict()
    }, os.path.join(args.save,
                    'pruned' + '_pecnet' + str(percent) + '.pth.tar'))
    #torch.save({'cfg': cfg ,'state_dict': oldmodel.state_dict()}, os.path.join(args.save, 'oldpruned.pth.tar'))
    print('newmodelsaved')
    print("testnewmodel:")
    model = newmodel
    return model
        )
    )
    print("recall per class:")
    print(fusion_matrix.get_rec_per_class())
    print("precision per class:")
    print(fusion_matrix.get_pre_per_class())
    # print("loss per class:")
    # print(loss_vector.get_avg_loss_per_class())

    return top1_acc, fusion_matrix.get_rec_per_class()


if __name__ == "__main__":

    args = parse_args()
    update_config(cfg, args)
    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu_ids
    print(cfg.DATASET.DATASET)

    train_set = eval(cfg.DATASET.DATASET)("train", cfg)
    test_set = eval(cfg.DATASET.DATASET)("valid", cfg)
    num_classes = test_set.get_num_classes()

    annotations_train = train_set.get_annotations()
    num_class_list_train, cat_list_train = get_category_list(annotations_train, num_classes, cfg)
    annotations_test = test_set.get_annotations()
    num_class_list_test, cat_list_test = get_category_list(annotations_test, num_classes, cfg)

    device = torch.device("cpu" if cfg.CPU_MODE else "cuda")

    para_dict_train = {
Exemplo n.º 31
0
def main():
    args = parse_args()
    update_config(cfg, args)

    logger, final_output_dir, tb_log_dir = create_logger(
        cfg, args.cfg, 'train')

    logger.info(pprint.pformat(args))
    logger.info(cfg)

    # cudnn related setting
    cudnn.benchmark = cfg.CUDNN.BENCHMARK
    torch.backends.cudnn.deterministic = cfg.CUDNN.DETERMINISTIC
    torch.backends.cudnn.enabled = cfg.CUDNN.ENABLED

    model_p, model_d = eval('models.' + cfg.MODEL.NAME +
                            '.get_adaptive_pose_net')(cfg, is_train=True)

    if cfg.TRAIN.CHECKPOINT:
        logger.info('=> loading model from {}'.format(cfg.TRAIN.CHECKPOINT))
        model_p.load_state_dict(torch.load(cfg.TRAIN.CHECKPOINT))

    # copy model file
    this_dir = os.path.dirname(__file__)
    shutil.copy2(
        os.path.join(this_dir, '../lib/models', cfg.MODEL.NAME + '.py'),
        final_output_dir)

    writer_dict = {
        'writer': SummaryWriter(log_dir=tb_log_dir),
        'pre_train_global_steps': 0,
        'train_global_steps': 0,
        'valid_global_steps': 0,
    }

    dump_input = torch.rand(
        (1, 3, cfg.MODEL.IMAGE_SIZE[1], cfg.MODEL.IMAGE_SIZE[0]))
    writer_dict['writer'].add_graph(model_p, (dump_input, ), verbose=False)

    logger.info(get_model_summary(model_p, dump_input))

    model_p = torch.nn.DataParallel(model_p, device_ids=cfg.GPUS).cuda()
    model_d = torch.nn.DataParallel(model_d, device_ids=cfg.GPUS).cuda()

    # define loss function (criterion) and optimizer for pose_net
    criterion_p = JointsMSELoss(
        use_target_weight=cfg.LOSS.USE_TARGET_WEIGHT).cuda()

    optimizer_p = get_optimizer(cfg, model_p)

    # define loss function (criterion) and optimizer for domain
    criterion_d = torch.nn.BCEWithLogitsLoss().cuda()
    optimizer_d = get_optimizer(cfg, model_d)

    # Data loading code
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])

    train_pre_dataset = eval('dataset.' + cfg.DATASET.DATASET)(
        cfg, cfg.DATASET.ROOT, cfg.DATASET.TRAIN_PRE_SET, True,
        transforms.Compose([
            transforms.ToTensor(),
            normalize,
        ]))

    train_dataset = eval('dataset.' + cfg.DATASET.DATASET)(
        cfg, cfg.DATASET.ROOT, cfg.DATASET.TRAIN_SET, True,
        transforms.Compose([
            transforms.ToTensor(),
            normalize,
        ]))

    valid_dataset = eval('dataset.' + cfg.DATASET.DATASET)(
        cfg, cfg.DATASET.ROOT, cfg.DATASET.TEST_SET, False,
        transforms.Compose([
            transforms.ToTensor(),
            normalize,
        ]))

    train_pre_loader = torch.utils.data.DataLoader(
        train_pre_dataset,
        batch_size=cfg.TRAIN.PRE_BATCH_SIZE_PER_GPU * len(cfg.GPUS),
        shuffle=cfg.TRAIN.SHUFFLE,
        num_workers=cfg.WORKERS,
        pin_memory=cfg.PIN_MEMORY)

    syn_labels = train_dataset._load_syrip_syn_annotations()
    train_loader = torch.utils.data.DataLoader(
        train_dataset,
        sampler=BalancedBatchSampler(train_dataset, syn_labels),
        batch_size=cfg.TRAIN.BATCH_SIZE_PER_GPU * len(cfg.GPUS),
        num_workers=cfg.WORKERS,
        pin_memory=cfg.PIN_MEMORY)

    valid_loader = torch.utils.data.DataLoader(
        valid_dataset,
        batch_size=cfg.TEST.BATCH_SIZE_PER_GPU * len(cfg.GPUS),
        shuffle=False,
        num_workers=cfg.WORKERS,
        pin_memory=cfg.PIN_MEMORY)

    best_perf = 0.0
    best_model = False
    last_epoch = -1
    begin_epoch = cfg.TRAIN.BEGIN_EPOCH
    checkpoint_file = os.path.join(final_output_dir, 'checkpoint.pth')

    if cfg.AUTO_RESUME and os.path.exists(checkpoint_file):
        logger.info("=> loading checkpoint '{}'".format(checkpoint_file))
        checkpoint = torch.load(checkpoint_file)
        begin_epoch = checkpoint['epoch']
        best_perf = checkpoint['perf']
        last_epoch = checkpoint['epoch']
        model_p.load_state_dict(checkpoint['state_dict'])

        optimizer_p.load_state_dict(checkpoint['optimizer'])
        logger.info("=> loaded checkpoint '{}' (epoch {})".format(
            checkpoint_file, checkpoint['epoch']))

    # freeze some layers
    idx = 0
    for param in model_p.parameters():

        if idx <= 108:  #fix 108 for stage 2 + bottleneck  or fix 483 for stage 3 + stage 2+ bottleneck
            param.requires_grad = False
            #print(param.data.shape)
        idx = idx + 1

    lr_scheduler_p = torch.optim.lr_scheduler.MultiStepLR(
        optimizer_p,
        cfg.TRAIN.LR_STEP,
        cfg.TRAIN.LR_FACTOR,
        last_epoch=last_epoch)

    lr_scheduler_d = torch.optim.lr_scheduler.MultiStepLR(
        optimizer_d, cfg.TRAIN.LR_STEP, cfg.TRAIN.LR_FACTOR)

    epoch_D = cfg.TRAIN.PRE_EPOCH
    losses_D_list = []
    acces_D_list = []
    acc_num_total = 0
    num = 0
    losses_d = AverageMeter()

    # Pretrained Stage
    print('Pretrained Stage:')
    print('Start to train Domain Classifier-------')
    for epoch_d in range(epoch_D):  # epoch
        model_d.train()
        model_p.train()

        for i, (input, target, target_weight,
                meta) in enumerate(train_pre_loader):  # iteration
            # compute output for pose_net
            feature_outputs, outputs = model_p(input)
            #print(feature_outputs.size())
            # compute for domain classifier
            domain_logits = model_d(feature_outputs.detach())
            domain_label = (meta['synthetic'].unsqueeze(-1) *
                            1.0).cuda(non_blocking=True)
            # print(domain_label)

            loss_d = criterion_d(domain_logits, domain_label)
            loss_d.backward(retain_graph=True)
            optimizer_d.step()

            # compute accuracy of classifier
            acc_num = 0
            for j in range(len(domain_label)):
                if (domain_logits[j] > 0 and domain_label[j] == 1.0) or (
                        domain_logits[j] < 0 and domain_label[j] == 0.0):
                    acc_num += 1
                    acc_num_total += 1
                num += 1
            acc_d = acc_num * 1.0 / input.size(0)
            acces_D_list.append(acc_d)

            optimizer_d.zero_grad()
            losses_d.update(loss_d.item(), input.size(0))

            if i % cfg.PRINT_FREQ == 0:
                msg = 'Epoch: [{0}][{1}/{2}]\t' \
                      'Accuracy_d: {3} ({4})\t' \
                      'Loss_d: {loss_d.val:.5f} ({loss_d.avg:.5f})'.format(
                          epoch_d, i, len(train_pre_loader), acc_d, acc_num_total * 1.0 / num, loss_d = losses_d)
                logger.info(msg)

                writer = writer_dict['writer']
                pre_global_steps = writer_dict['pre_train_global_steps']
                writer.add_scalar('pre_train_loss_D', losses_d.val,
                                  pre_global_steps)
                writer.add_scalar('pre_train_acc_D', acc_d, pre_global_steps)
                writer_dict['pre_train_global_steps'] = pre_global_steps + 1

            losses_D_list.append(losses_d.val)

    print('Training Stage (Step I and II):')
    losses_P_list = []
    acces_P_list = []
    losses_p = AverageMeter()
    acces_p = AverageMeter()
    for epoch in range(begin_epoch, cfg.TRAIN.END_EPOCH):
        lr_scheduler_p.step()

        # train for one epoch
        losses_P_list, losses_D_list, acces_P_list, acces_D_list = train_adaptive(
            cfg, train_loader, model_p, model_d, criterion_p, criterion_d,
            optimizer_p, optimizer_d, epoch, final_output_dir, tb_log_dir,
            writer_dict, losses_P_list, losses_D_list, acces_P_list,
            acces_D_list, acc_num_total, num, losses_p, acces_p, losses_d)

        # evaluate on validation set
        perf_indicator = validate_adaptive(cfg, valid_loader, valid_dataset,
                                           model_p, criterion_p,
                                           final_output_dir, tb_log_dir,
                                           writer_dict)

        if perf_indicator > best_perf:
            best_perf = perf_indicator
            best_model = True
        else:
            best_model = False

        logger.info('=> saving checkpoint to {}'.format(final_output_dir))
        save_checkpoint(
            {
                'epoch': epoch + 1,
                'model': cfg.MODEL.NAME,
                'state_dict': model_p.state_dict(),
                'best_state_dict': model_p.module.state_dict(),
                'perf': perf_indicator,
                'optimizer': optimizer_p.state_dict(),
            }, best_model, final_output_dir)

    final_model_state_file = os.path.join(final_output_dir, 'final_state.pth')
    logger.info(
        'saving final model state to {}'.format(final_model_state_file))
    torch.save(model_p.module.state_dict(), final_model_state_file)
    writer_dict['writer'].close()

    np.save('./losses_D.npy', np.array(losses_D_list))  # Adversarial-D
    np.save('./losses_P.npy', np.array(losses_P_list))  # P
    np.save('./acces_P.npy', np.array(acces_P_list))  # P
    np.save('./acces_D.npy', np.array(acces_D_list))  # D
Exemplo n.º 32
0
def parse_args():
    parser = argparse.ArgumentParser(description='Visualize GradCAM')

    parser.add_argument('--cfg',
                        help='experiment configure file name',
                        required=True,
                        type=str)
    parser.add_argument('--vis-mode',
                        type=str,
                        default='GradCAM',
                        choices=METHODS.keys(),
                        help='Type of gradient visualization')
    parser.add_argument('--image-index',
                        type=int,
                        default=0,
                        help='Index of input image for GradCAM')
    parser.add_argument('--image-index-range',
                        type=int,
                        nargs=3,
                        help='Expects [start, end) and step.')
    parser.add_argument('--crop-size',
                        type=int,
                        default=-1,
                        help='Size of crop around the center pixel')
    parser.add_argument(
        '--pixel-max-num-random',
        type=int,
        default=10,
        help='Maximum number of pixels to randomly sample from '
        'an image, when fetching all predictions for a class.')
    parser.add_argument(
        '--pixel-i',
        type=int,
        default=0,
        nargs='*',
        help='i coordinate of pixel from which to compute GradCAM')
    parser.add_argument(
        '--pixel-j',
        type=int,
        default=0,
        nargs='*',
        help='j coordinate of pixel from which to compute GradCAM')
    parser.add_argument(
        '--pixel-i-range',
        type=int,
        nargs=3,
        help='Range for pixel i. Expects [start, end) and step.')
    parser.add_argument(
        '--pixel-j-range',
        type=int,
        nargs=3,
        help='Range for pixel j. Expects [start, end) and step.')
    parser.add_argument('--pixel-cartesian-product',
                        action='store_true',
                        help='Compute cartesian product between all is and js '
                        'for the full list of pixels.')
    parser.add_argument('--suffix',
                        default='',
                        help='Appended to each image filename.')
    parser.add_argument(
        '--target-layers',
        type=str,
        help='List of target layers from which to compute GradCAM')
    parser.add_argument(
        '--nbdt-node-wnids-for',
        type=str,
        help='Class NAME. Automatically computes nodes leading '
        'up to particular class leaf.')
    parser.add_argument('--crop-for', type=str, help='Class to crop for')
    parser.add_argument(
        '--nbdt-node-wnid',
        type=str,
        default='',
        nargs='*',
        help='WNID of NBDT node from which to compute output logits')
    parser.add_argument('opts',
                        help="Modify config options using the command-line",
                        default=None,
                        nargs=argparse.REMAINDER)
    parser.add_argument('--skip-save-npy',
                        action='store_true',
                        help="Don't save the npy file.")

    args = parser.parse_args()
    update_config(config, args)

    return args
Exemplo n.º 33
0
def scan_credentials():
    credentials = scan_attempts(4)

    if credentials:
        if ("lnd-config" in credentials) and ("lnd.config" in credentials):
            logger.info("BTCPayServer LND Credentials detected.")
            r = requests.get(credentials.lstrip("config="))
            data = r.json()
            data = data["configurations"][0]

            config.update_config("btcpay", "url", data["uri"] + "v1")
            config.update_config("lnd", "macaroon", data["adminMacaroon"])
            config.update_config("atm", "activewallet", "btcpay_lnd")

        elif ("lntxbot" in credentials) and ("==@" in credentials):
            logger.info("Lntxbot Credentials detected.")

            config.update_config("lntxbot", "creds", credentials.split("@")[0])
            config.update_config("lntxbot", "url", credentials.split("@")[1])
            config.update_config("atm", "activewallet", "lntxbot")

        else:
            logger.error("No credentials to a known wallet could be detected.")
    else:
        logger.error("No credentials to a known wallet could be detected.")
def main():
    args = parse_args()
    update_config(cfg, args)

    logger, final_output_dir, tb_log_dir = create_logger(
        cfg, args.cfg, 'valid')

    logger.info(pprint.pformat(args))
    logger.info(cfg)

    # cudnn related setting
    cudnn.benchmark = cfg.CUDNN.BENCHMARK
    torch.backends.cudnn.deterministic = cfg.CUDNN.DETERMINISTIC
    torch.backends.cudnn.enabled = cfg.CUDNN.ENABLED

    model = eval('models.' + cfg.MODEL.NAME + '.get_pose_net')(cfg,
                                                               is_train=False)

    if cfg.TEST.MODEL_FILE:
        logger.info('=> loading model from {}'.format(cfg.TEST.MODEL_FILE))
        model.load_state_dict(torch.load(cfg.TEST.MODEL_FILE), strict=False)
    else:
        model_state_file = os.path.join(final_output_dir, 'final_state.pth')
        logger.info('=> loading model from {}'.format(model_state_file))
        model.load_state_dict(torch.load(model_state_file))

    model = torch.nn.DataParallel(model, device_ids=cfg.GPUS).cuda()

    # define loss function (criterion) and optimizer
    criterion = JointsMSELoss(
        use_target_weight=cfg.LOSS.USE_TARGET_WEIGHT).cuda()

    # Loading an image
    image_file = args.img_file
    data_numpy = cv2.imread(image_file,
                            cv2.IMREAD_COLOR | cv2.IMREAD_IGNORE_ORIENTATION)
    if data_numpy is None:
        logger.error('=> fail to read {}'.format(image_file))
        raise ValueError('=> fail to read {}'.format(image_file))

    # object detection box
    box = [450, 160, 350, 560]
    c, s = _box2cs(box, cfg.MODEL.IMAGE_SIZE[0], cfg.MODEL.IMAGE_SIZE[1])
    r = 0

    trans = get_affine_transform(c, s, r, cfg.MODEL.IMAGE_SIZE)
    input = cv2.warpAffine(
        data_numpy,
        trans, (int(cfg.MODEL.IMAGE_SIZE[0]), int(cfg.MODEL.IMAGE_SIZE[1])),
        flags=cv2.INTER_LINEAR)
    transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406],
                             std=[0.229, 0.224, 0.225]),
    ])

    input = transform(input).unsqueeze(0)
    # switch to evaluate mode
    model.eval()
    with torch.no_grad():
        # compute output heatmap
        output = model(input)
        preds, maxvals = get_final_preds(cfg,
                                         output.clone().cpu().numpy(),
                                         np.asarray([c]), np.asarray([s]))

        image = data_numpy.copy()
        for mat in preds[0]:
            x, y = int(mat[0]), int(mat[1])
            cv2.circle(image, (x, y), 2, (255, 0, 0), 2)

            # vis result
        cv2.imwrite("test_h36m.jpg", image)
        cv2.imshow('res', image)
        cv2.waitKey(10000)
Exemplo n.º 35
0
def main():
    args = parse_args()
    update_config(cfg, args)

    logger, final_output_dir, tb_log_dir = create_logger(
        cfg, args.cfg, 'train')

    logger.info(pprint.pformat(args))
    logger.info(cfg)

    # cudnn related setting
    cudnn.benchmark = cfg.CUDNN.BENCHMARK
    torch.backends.cudnn.deterministic = cfg.CUDNN.DETERMINISTIC
    torch.backends.cudnn.enabled = cfg.CUDNN.ENABLED

    model = eval('models.' + cfg.MODEL.NAME + '.get_pose_net')(cfg,
                                                               is_train=True)

    # copy model file
    this_dir = os.path.dirname(__file__)
    shutil.copy2(
        os.path.join(this_dir, '../lib/models', cfg.MODEL.NAME + '.py'),
        final_output_dir)
    # logger.info(pprint.pformat(model))
    dump_input = torch.rand(
        (1, 3, cfg.MODEL.IMAGE_SIZE[1], cfg.MODEL.IMAGE_SIZE[0]))
    logger.info(get_model_summary(model, dump_input))

    #model = torch.nn.DataParallel(model, device_ids= (0)).cuda()
    model = model.cuda()
    # define loss function (criterion) and optimizer
    criterion = JointsMSELoss(
        use_target_weight=cfg.LOSS.USE_TARGET_WEIGHT).cuda()

    # Data loading code
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])
    train_dataset = eval('dataset.' + cfg.DATASET.DATASET)(
        cfg, cfg.DATASET.ROOT, cfg.DATASET.TRAIN_SET, True,
        transforms.Compose([
            transforms.ToTensor(),
            normalize,
        ]))
    valid_dataset = eval('dataset.' + cfg.DATASET.DATASET)(
        cfg, cfg.DATASET.ROOT, cfg.DATASET.TEST_SET, False,
        transforms.Compose([
            transforms.ToTensor(),
            normalize,
        ]))

    train_loader = torch.utils.data.DataLoader(
        train_dataset,
        batch_size=cfg.TRAIN.BATCH_SIZE_PER_GPU * 1,
        shuffle=cfg.TRAIN.SHUFFLE,
        num_workers=cfg.WORKERS,
        pin_memory=cfg.PIN_MEMORY)
    valid_loader = torch.utils.data.DataLoader(
        valid_dataset,
        batch_size=cfg.TEST.BATCH_SIZE_PER_GPU * 1,
        shuffle=False,
        num_workers=cfg.WORKERS,
        pin_memory=cfg.PIN_MEMORY)

    best_perf = 0.0
    best_model = False
    last_epoch = -1
    optimizer = get_optimizer(cfg, model)
    begin_epoch = cfg.TRAIN.BEGIN_EPOCH
    checkpoint_file = os.path.join(final_output_dir, 'checkpoint.pth')

    if cfg.AUTO_RESUME and os.path.exists(checkpoint_file):
        logger.info("=> loading checkpoint '{}'".format(checkpoint_file))
        checkpoint = torch.load(checkpoint_file)
        begin_epoch = checkpoint['epoch']
        best_perf = checkpoint['perf']
        last_epoch = checkpoint['epoch']
        model.load_state_dict(checkpoint['state_dict'])

        optimizer.load_state_dict(checkpoint['optimizer'])
        logger.info("=> loaded checkpoint '{}' (epoch {})".format(
            checkpoint_file, checkpoint['epoch']))

    lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer,
                                                        cfg.TRAIN.LR_STEP,
                                                        cfg.TRAIN.LR_FACTOR,
                                                        last_epoch=last_epoch)

    for epoch in range(begin_epoch, cfg.TRAIN.END_EPOCH):
        lr_scheduler.step()

        # train for one epoch
        train(cfg, train_loader, model, criterion, optimizer, epoch,
              final_output_dir, tb_log_dir)

        # evaluate on validation set
        perf_indicator = validate(cfg, valid_loader, valid_dataset, model,
                                  criterion, final_output_dir, tb_log_dir)

        if perf_indicator >= best_perf:
            best_perf = perf_indicator
            best_model = True
        else:
            best_model = False

        logger.info('=> saving checkpoint to {}'.format(final_output_dir))
        save_checkpoint(
            {
                'epoch': epoch + 1,
                'model': cfg.MODEL.NAME,
                'state_dict': model.state_dict(),
                'best_state_dict': model.module.state_dict(),
                'perf': perf_indicator,
                'optimizer': optimizer.state_dict(),
            }, best_model, final_output_dir)

    final_model_state_file = os.path.join(final_output_dir, 'final_state.pth')
    logger.info(
        '=> saving final model state to {}'.format(final_model_state_file))
    torch.save(model.module.state_dict(), final_model_state_file)
def main():
    # transformation
    pose_transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406],
                             std=[0.229, 0.224, 0.225]),
    ])

    # cudnn related setting
    cudnn.benchmark = cfg.CUDNN.BENCHMARK
    torch.backends.cudnn.deterministic = cfg.CUDNN.DETERMINISTIC
    torch.backends.cudnn.enabled = cfg.CUDNN.ENABLED

    args = parse_args()
    update_config(cfg, args)
    pose_dir = prepare_output_dirs(args.outputDir)
    csv_output_rows = []

    pose_model = eval('models.' + cfg.MODEL.NAME + '.get_pose_net')(
        cfg, is_train=False)

    if cfg.TEST.MODEL_FILE:
        print('=> loading model from {}'.format(cfg.TEST.MODEL_FILE))
        pose_model.load_state_dict(torch.load(cfg.TEST.MODEL_FILE),
                                   strict=False)
    else:
        print('expected model defined in config at TEST.MODEL_FILE')

    pose_model.to(CTX)
    pose_model.eval()

    image_files = glob.glob(os.path.join(args.imageDir, "*"))
    count = 0
    for image_file in image_files:
        total_now = time.time()
        image_bgr = cv2.imread(image_file)
        count += 1

        image_rgb = cv2.cvtColor(image_bgr, cv2.COLOR_BGR2RGB)

        # Clone 2 image for person detection and pose estimation
        if cfg.DATASET.COLOR_RGB:
            image_per = image_rgb.copy()
            image_pose = image_rgb.copy()
        else:
            image_per = image_bgr.copy()
            image_pose = image_bgr.copy()

        # Clone 1 image for debugging purpose
        image_debug = image_bgr.copy()

        # object detection box
        now = time.time()
        w = image_rgb.shape[1]
        h = image_rgb.shape[0]
        # pred_boxes = [[[int(w * 0.1), int(h * 0.1)], [int(w * 0.9), int(h * 0.9)]]]
        pred_boxes = [[[int(w * 0.), int(h * 0.)], [int(w * 1.), int(h * 1.)]]]
        then = time.time()
        print("Find person bbox in: {} sec".format(then - now))

        # pose estimation : for multiple people
        centers = []
        scales = []
        for box in pred_boxes:
            center, scale = box_to_center_scale(box, cfg.MODEL.IMAGE_SIZE[0],
                                                cfg.MODEL.IMAGE_SIZE[1])
            centers.append(center)
            scales.append(scale)

        now = time.time()
        pose_preds = get_pose_estimation_prediction(pose_model,
                                                    image_pose,
                                                    centers,
                                                    scales,
                                                    transform=pose_transform)
        then = time.time()
        print("Find person pose in: {} sec".format(then - now))
        print(pose_preds)
        new_csv_row = []
        cs = [(255, 0, 0), (0, 255, 0), (0, 0, 255)]
        for coords in pose_preds:
            # Draw each point on image
            for cidx, coord in enumerate(coords):
                x_coord, y_coord = int(coord[0]), int(coord[1])
                c = cs[cidx]
                cv2.circle(image_debug, (x_coord, y_coord), 40, c, 20)
                new_csv_row.extend([x_coord, y_coord])

        total_then = time.time()

        text = "{:03.2f} sec".format(total_then - total_now)
        cv2.putText(image_debug, text, (100, 50), cv2.FONT_HERSHEY_SIMPLEX, 1,
                    (0, 0, 255), 2, cv2.LINE_AA)

        csv_output_rows.append(new_csv_row)
        img_file = os.path.join(pose_dir, 'pose_{:08d}.jpg'.format(count))
        cv2.imwrite(img_file, image_debug)

    # write csv
    csv_headers = ['frame']
    for keypoint in COCO_KEYPOINT_INDEXES.values():
        csv_headers.extend([keypoint + '_x', keypoint + '_y'])

    csv_output_filename = os.path.join(args.outputDir, 'pose-data.csv')
    with open(csv_output_filename, 'w', newline='') as csvfile:
        csvwriter = csv.writer(csvfile)
        csvwriter.writerow(csv_headers)
        csvwriter.writerows(csv_output_rows)

    cv2.destroyAllWindows()
Exemplo n.º 37
0
def main():
    args = parse_args()
    update_config(cfg, args)

    # cudnn related setting
    cudnn.benchmark = cfg.CUDNN.BENCHMARK
    torch.backends.cudnn.deterministic = cfg.CUDNN.DETERMINISTIC
    torch.backends.cudnn.enabled = cfg.CUDNN.ENABLED

    # Set the random seed manually for reproducibility.
    np.random.seed(cfg.SEED)
    torch.manual_seed(cfg.SEED)
    torch.cuda.manual_seed_all(cfg.SEED)

    # Loss
    criterion = CrossEntropyLoss(cfg.MODEL.NUM_CLASSES).cuda()

    # model and optimizer
    model = Network(cfg.MODEL.INIT_CHANNELS,
                    cfg.MODEL.NUM_CLASSES,
                    cfg.MODEL.LAYERS,
                    criterion,
                    primitives_2,
                    drop_path_prob=cfg.TRAIN.DROPPATH_PROB)
    model = model.cuda()

    # weight params
    arch_params = list(map(id, model.arch_parameters()))
    weight_params = filter(lambda p: id(p) not in arch_params,
                           model.parameters())

    # Optimizer
    optimizer = optim.Adam(weight_params, lr=cfg.TRAIN.LR)

    # resume && make log dir and logger
    if args.load_path and os.path.exists(args.load_path):
        checkpoint_file = os.path.join(args.load_path, 'Model',
                                       'checkpoint_best.pth')
        assert os.path.exists(checkpoint_file)
        checkpoint = torch.load(checkpoint_file)

        # load checkpoint
        begin_epoch = checkpoint['epoch']
        last_epoch = checkpoint['epoch']
        model.load_state_dict(checkpoint['state_dict'])
        best_acc1 = checkpoint['best_acc1']
        optimizer.load_state_dict(checkpoint['optimizer'])
        args.path_helper = checkpoint['path_helper']

        logger = create_logger(args.path_helper['log_path'])
        logger.info("=> loaded checkpoint '{}'".format(checkpoint_file))
    else:
        exp_name = args.cfg.split('/')[-1].split('.')[0]
        args.path_helper = set_path('logs_search', exp_name)
        logger = create_logger(args.path_helper['log_path'])
        begin_epoch = cfg.TRAIN.BEGIN_EPOCH
        best_acc1 = 0.0
        last_epoch = -1

    logger.info(args)
    logger.info(cfg)

    # copy model file
    this_dir = os.path.dirname(__file__)
    shutil.copy2(os.path.join(this_dir, 'models', cfg.MODEL.NAME + '.py'),
                 args.path_helper['ckpt_path'])

    # dataloader
    train_dataset = DeepSpeakerDataset(Path(cfg.DATASET.DATA_DIR),
                                       cfg.DATASET.SUB_DIR,
                                       cfg.DATASET.PARTIAL_N_FRAMES, 'train')
    val_dataset = DeepSpeakerDataset(Path(cfg.DATASET.DATA_DIR),
                                     cfg.DATASET.SUB_DIR,
                                     cfg.DATASET.PARTIAL_N_FRAMES, 'val')
    train_loader = torch.utils.data.DataLoader(
        dataset=train_dataset,
        batch_size=cfg.TRAIN.BATCH_SIZE,
        num_workers=cfg.DATASET.NUM_WORKERS,
        pin_memory=True,
        shuffle=True,
        drop_last=True,
    )
    val_loader = torch.utils.data.DataLoader(
        dataset=val_dataset,
        batch_size=cfg.TRAIN.BATCH_SIZE,
        num_workers=cfg.DATASET.NUM_WORKERS,
        pin_memory=True,
        shuffle=True,
        drop_last=True,
    )
    test_dataset = DeepSpeakerDataset(Path(cfg.DATASET.DATA_DIR),
                                      cfg.DATASET.SUB_DIR,
                                      cfg.DATASET.PARTIAL_N_FRAMES,
                                      'test',
                                      is_test=True)
    test_loader = torch.utils.data.DataLoader(
        dataset=test_dataset,
        batch_size=1,
        num_workers=cfg.DATASET.NUM_WORKERS,
        pin_memory=True,
        shuffle=True,
        drop_last=True,
    )

    # training setting
    writer_dict = {
        'writer': SummaryWriter(args.path_helper['log_path']),
        'train_global_steps': begin_epoch * len(train_loader),
        'valid_global_steps': begin_epoch // cfg.VAL_FREQ,
    }

    # training loop
    architect = Architect(model, cfg)
    lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
        optimizer,
        cfg.TRAIN.END_EPOCH,
        cfg.TRAIN.LR_MIN,
        last_epoch=last_epoch)

    for epoch in tqdm(range(begin_epoch, cfg.TRAIN.END_EPOCH),
                      desc='search progress'):
        model.train()

        genotype = model.genotype()
        logger.info('genotype = %s', genotype)

        if cfg.TRAIN.DROPPATH_PROB != 0:
            model.drop_path_prob = cfg.TRAIN.DROPPATH_PROB * epoch / (
                cfg.TRAIN.END_EPOCH - 1)

        train(cfg, model, optimizer, train_loader, val_loader, criterion,
              architect, epoch, writer_dict)

        if epoch % cfg.VAL_FREQ == 0:
            # get threshold and evaluate on validation set
            acc = validate_identification(cfg, model, test_loader, criterion)

            # remember best acc@1 and save checkpoint
            is_best = acc > best_acc1
            best_acc1 = max(acc, best_acc1)

            # save
            logger.info('=> saving checkpoint to {}'.format(
                args.path_helper['ckpt_path']))
            save_checkpoint(
                {
                    'epoch': epoch + 1,
                    'state_dict': model.state_dict(),
                    'best_acc1': best_acc1,
                    'optimizer': optimizer.state_dict(),
                    'arch': model.arch_parameters(),
                    'genotype': genotype,
                    'path_helper': args.path_helper
                }, is_best, args.path_helper['ckpt_path'],
                'checkpoint_{}.pth'.format(epoch))

        lr_scheduler.step(epoch)
Exemplo n.º 38
0
 def get(self):
   config.update_config()
Exemplo n.º 39
0
                 epoch, model, optimizer)
            with torch.no_grad():
                log_dict_val, preds = trainer.val(epoch, val_loader)
                mAP = val_dataset.run_eval(preds, cfg.OUTPUT_DIR)
                print('mAP is: ', mAP)
            for k, v in log_dict_val.items():
                logger.scalar_summary('val_{}'.format(k), v, epoch)
                logger.write('{} {:8f} | '.format(k, v))
            if mAP > best:
                best = mAP
                save_model(os.path.join(cfg.OUTPUT_DIR, 'model_best.pth'), 
                           epoch, model)
        else:
            save_model(os.path.join(cfg.OUTPUT_DIR, 'model_last.pth'), 
                     epoch, model, optimizer)
        logger.write('\n')
        if epoch in cfg.TRAIN.LR_STEP:
            save_model(os.path.join(cfg.OUTPUT_DIR, 'model_{}.pth'.format(epoch)), 
                     epoch, model, optimizer)
            lr = cfg.TRAIN.LR * (0.1 ** (cfg.TRAIN.LR_STEP.index(epoch) + 1))
            print('Drop LR to', lr)
            for param_group in optimizer.param_groups:
              param_group['lr'] = lr
    logger.close()

if __name__ == '__main__':
    args = parse_args()
    update_config(cfg, args.cfg)
    local_rank = args.local_rank
    main(cfg, local_rank)
Exemplo n.º 40
0
    def update_config(self, *args, **kwds): config.update_config(*args, **kwds)

    def render_template(self, *args, **kwds):
Exemplo n.º 41
0
def main():
    args = parse_args()
    update_config(cfg, args)
    if args.load_path is None:
        raise AttributeError("Please specify load path.")

    # cudnn related setting
    cudnn.benchmark = cfg.CUDNN.BENCHMARK
    torch.backends.cudnn.deterministic = cfg.CUDNN.DETERMINISTIC
    torch.backends.cudnn.enabled = cfg.CUDNN.ENABLED

    # Set the random seed manually for reproducibility.
    np.random.seed(cfg.SEED)
    torch.manual_seed(cfg.SEED)
    torch.cuda.manual_seed_all(cfg.SEED)
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    # model and optimizer
    if cfg.MODEL.NAME == 'model':
        if args.load_path and os.path.exists(args.load_path):
            checkpoint = torch.load(args.load_path)
            # genotype = checkpoint['genotype']
            genotype = eval(
                "Genotype(normal=[('dil_conv_5x5', 1), ('dil_conv_3x3', 0), ('dil_conv_5x5', 0), ('sep_conv_3x3', 1), ('sep_conv_3x3', 1), ('sep_conv_3x3', 2), ('dil_conv_3x3', 2), ('max_pool_3x3', 1)], normal_concat=range(2, 6), reduce=[('max_pool_3x3', 1), ('max_pool_3x3', 0), ('dil_conv_5x5', 2), ('max_pool_3x3', 1), ('dil_conv_5x5', 3), ('dil_conv_3x3', 2), ('dil_conv_5x5', 4), ('dil_conv_5x5', 2)], reduce_concat=range(2, 6))"
            )
        else:
            raise AssertionError('Please specify the model to evaluate')
        model = IR_50(1)
        # model = Network(cfg.MODEL.INIT_CHANNELS, cfg.MODEL.NUM_CLASSES, cfg.MODEL.LAYERS, genotype)
        model.drop_path_prob = 0.0
    else:
        model = eval('resnet.{}(num_classes={})'.format(
            cfg.MODEL.NAME, cfg.MODEL.NUM_CLASSES))
    model = model.to(device)

    nb_params = sum([param.view(-1).size()[0] for param in model.parameters()])
    print('nb_params: {}'.format(nb_params))

    # resume && make log dir and logger
    if args.load_path and os.path.exists(args.load_path):
        checkpoint = torch.load(args.load_path, map_location="cpu")

        # load checkpoint
        del checkpoint['state_dict']['classifier.weight']
        del checkpoint['state_dict']['classifier.bias']
        model.load_state_dict(checkpoint['state_dict'], strict=False)
        args.path_helper = checkpoint['path_helper']

        logger = create_logger(os.path.dirname(args.load_path))
        logger.info("=> loaded checkpoint '{}'".format(args.load_path))
    else:
        raise AssertionError('Please specify the model to evaluate')
    logger.info(args)
    logger.info(cfg)

    # dataloader
    # test_dataset_verification = VoxcelebTestset(
    #     Path(cfg.DATASET.DATA_DIR), cfg.DATASET.PARTIAL_N_FRAMES
    # )
    test_dataset_verification = VoxcelebTestsetZalo(
        Path(cfg.DATASET.DATA_DIR), cfg.DATASET.PARTIAL_N_FRAMES)
    test_loader_verification = torch.utils.data.DataLoader(
        dataset=test_dataset_verification,
        batch_size=1,
        num_workers=cfg.DATASET.NUM_WORKERS,
        pin_memory=True,
        shuffle=False,
        drop_last=False,
    )

    validate_verification(cfg, model, test_loader_verification)
Exemplo n.º 42
0
def set_runner(runner_id):
    if runner_id is not None:
        config.update_config('runner', runner_id)
        
    return runner_id
Exemplo n.º 43
0
def main():
    # cudnn related setting
    cudnn.benchmark = cfg.CUDNN.BENCHMARK
    torch.backends.cudnn.deterministic = cfg.CUDNN.DETERMINISTIC
    torch.backends.cudnn.enabled = cfg.CUDNN.ENABLED

    args = parse_args()
    update_config(cfg, args)
    pose_dir, box_dir = prepare_output_dirs(args.outputDir)
    csv_output_filename = args.outputDir + 'pose-data.csv'
    csv_output_rows = []

    box_model = torchvision.models.detection.fasterrcnn_resnet50_fpn(
        pretrained=True)
    box_model.eval()

    pose_model = eval('models.' + cfg.MODEL.NAME + '.get_pose_net')(
        cfg, is_train=False)

    if cfg.TEST.MODEL_FILE:
        print('=> loading model from {}'.format(cfg.TEST.MODEL_FILE))
        pose_model.load_state_dict(torch.load(cfg.TEST.MODEL_FILE),
                                   strict=False)
    else:
        print('expected model defined in config at TEST.MODEL_FILE')

    pose_model = torch.nn.DataParallel(pose_model, device_ids=cfg.GPUS).cuda()

    # Loading an video
    vidcap = cv2.VideoCapture(args.videoFile)
    fps = vidcap.get(cv2.CAP_PROP_FPS)
    if fps < args.inferenceFps:
        print('desired inference fps is ' + str(args.inferenceFps) +
              ' but video fps is ' + str(fps))
        exit()
    every_nth_frame = round(fps / args.inferenceFps)

    success, image_bgr = vidcap.read()
    count = 0

    while success:
        if count % every_nth_frame != 0:
            success, image_bgr = vidcap.read()
            count += 1
            continue

        image = image_bgr[:, :, [2, 1, 0]]
        count_str = str(count).zfill(32)

        # object detection box
        pred_boxes = get_person_detection_boxes(box_model,
                                                image,
                                                threshold=0.8)
        if args.writeBoxFrames:
            image_bgr_box = image_bgr.copy()
            for box in pred_boxes:
                cv2.rectangle(
                    image_bgr_box,
                    box[0],
                    box[1],
                    color=(0, 255, 0),
                    thickness=3)  # Draw Rectangle with the coordinates
            cv2.imwrite(box_dir + 'box%s.jpg' % count_str, image_bgr_box)
        if not pred_boxes:
            success, image_bgr = vidcap.read()
            count += 1
            continue

        # pose estimation
        box = pred_boxes[0]  # assume there is only 1 person
        center, scale = box_to_center_scale(box, cfg.MODEL.IMAGE_SIZE[0],
                                            cfg.MODEL.IMAGE_SIZE[1])
        image_pose = image.copy() if cfg.DATASET.COLOR_RGB else image_bgr.copy(
        )
        pose_preds = get_pose_estimation_prediction(pose_model, image_pose,
                                                    center, scale)

        new_csv_row = []
        for _, mat in enumerate(pose_preds[0]):
            x_coord, y_coord = int(mat[0]), int(mat[1])
            cv2.circle(image_bgr, (x_coord, y_coord), 4, (255, 0, 0), 2)
            new_csv_row.extend([x_coord, y_coord])

        csv_output_rows.append(new_csv_row)
        cv2.imwrite(pose_dir + 'pose%s.jpg' % count_str, image_bgr)

        # get next frame
        success, image_bgr = vidcap.read()
        count += 1

    # write csv
    csv_headers = ['frame']
    for keypoint in COCO_KEYPOINT_INDEXES.values():
        csv_headers.extend([keypoint + '_x', keypoint + '_y'])

    with open(csv_output_filename, 'w', newline='') as csvfile:
        csvwriter = csv.writer(csvfile)
        csvwriter.writerow(csv_headers)
        csvwriter.writerows(csv_output_rows)

    os.system("ffmpeg -y -r " + str(args.inferenceFps) +
              " -pattern_type glob -i '" + pose_dir +
              "/*.jpg' -c:v libx264 -vf fps=" + str(args.inferenceFps) +
              " -pix_fmt yuv420p /output/movie.mp4")
Exemplo n.º 44
0
 def auto_manage_changed(self, enable):
     logging.info("Turning auto_manage %s", "on" if enable else "off")
     self.config["auto_manage"] = enable
     update_config(auto_manage=enable)  # Update config.
def main():
    device = 'cuda'

    parser = argparse.ArgumentParser(description='Train segmentation network')

    parser.add_argument('--cfg',
                        help='experiment configure file name',
                        required=True,
                        type=str)
    parser.add_argument('opts',
                        help="Modify config options using the command-line",
                        default=None,
                        nargs=argparse.REMAINDER)

    args = parser.parse_args(
        args=["--cfg", "experiments\cityscapes\seg_hrnet_ocr_w48_demo.yaml"])
    update_config(config, args)

    # build model
    if torch.__version__.startswith('1'):
        module = eval('models.'+config.MODEL.NAME)
        module.BatchNorm2d_class = module.BatchNorm2d = torch.nn.BatchNorm2d
    model = eval('models.'+config.MODEL.NAME +
                 '.get_seg_model')(config)

    model.to(device)


    weightFile = '../weight/hrnet_ocr_cs_trainval_8227_torch11.pth'
    # weightFile = 'pspnet_46_6.pth.tar'

    if os.path.isfile(weightFile):
        # print(("=> loading checkpoint '{}'".format('pspnet_46_6.pth.tar')))
        checkpoint = torch.load(weightFile)
        # torch.nn.Module.load_state_dict(model, checkpoint['state_dict'])
        compatible_state_dict = {}
        for k, v in checkpoint.items():
            if 'model.' in k:
                compatible_state_dict[k[6:]] = v
            elif 'loss.' in k:
                pass
            else:
                compatible_state_dict[k] = v

        model.load_state_dict(compatible_state_dict, strict=True)
        model.eval()

    cudnn.benchmark = True
    cudnn.fastest = True

    showCvUi =False
    embeddingDict = {}
    spp = models.spp.SPPLayer(4,pool_type='avg_pool')
    train_loader = getCulaneLoader()
    for imgs,_,fileNames in tqdm(train_loader):
        imgs = imgs.cuda()

        with torch.no_grad():
            segOutput = model(imgs)[1]
            sppOut  = torch.argmax(segOutput, dim=0)#spp(torch.sigmoid(segOutput[1:15]))
            sppOut = torch.flatten(sppOut, start_dim=1)

        for embedding, fileName in zip(sppOut,fileNames):
            embeddingDict[fileName] = embedding.cpu().numpy()

        if showCvUi:
            # imageBgrCV = cv2.cvtColor(np.asarray(resizeImage), cv2.COLOR_RGB2BGR)
            # imageBgrCV = np.zeros((512,1024,3),dtype=np.uint8)
            imageBgrCV = tensorToCvBgr(imgs[0])
            segOutput = segOutput[0]
            # segOutput: [class,h,w]
            # t = torch.sigmoid(segOutput)
            t = torch.argmax(segOutput, dim=0)
            segOutput = t.byte().cpu().numpy()
            # segOutput: [1,1,h,w]

            # colorMapMat = np.array([lb.color for lb in labels],dtype=np.uint8)[...,::-1] # RGB to BGR
            segImage = Cityscapes.decode_target(
                segOutput).astype(np.uint8)[..., ::-1]
            segImage = cv2.resize(segImage, (1024, 512),
                                interpolation=cv2.INTER_NEAREST)
            # segImage = colorMapMat[segOutput]
            imageBgrCV = cv2.addWeighted(imageBgrCV, 0.5, segImage, 0.5, 0)
            # imageBgrCV = segImage
            # imageBgrCV = cv2.resize(imageBgrCV,(3384//4,2710//4))

            cv2.imshow('L', imageBgrCV)
            # The following frees up resources and closes all windows
            k = cv2.waitKey(1) & 0xff
            if k == 27:
                break
    if showCvUi:
        cv2.destroyWindow('L')

    with open('DatasetEmbedding.pkl', 'wb') as f:
        pickle.dump(embeddingDict, f)   
    return
Exemplo n.º 46
0
def main():
    # transformation
    pose_transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406],
                             std=[0.229, 0.224, 0.225]),
    ])

    # cudnn related setting
    cudnn.benchmark = cfg.CUDNN.BENCHMARK
    torch.backends.cudnn.deterministic = cfg.CUDNN.DETERMINISTIC
    torch.backends.cudnn.enabled = cfg.CUDNN.ENABLED

    args = parse_args()
    update_config(cfg, args)
    pose_dir = prepare_output_dirs(args.outputDir)
    csv_output_rows = []

    box_model = torchvision.models.detection.fasterrcnn_resnet50_fpn(
        pretrained=True)
    box_model.to(CTX)
    box_model.eval()
    pose_model = eval('models.' + cfg.MODEL.NAME + '.get_pose_net')(
        cfg, is_train=False)

    if cfg.TEST.MODEL_FILE:
        print('=> loading model from {}'.format(cfg.TEST.MODEL_FILE))
        pose_model.load_state_dict(torch.load(cfg.TEST.MODEL_FILE),
                                   strict=False)
    else:
        print('expected model defined in config at TEST.MODEL_FILE')

    pose_model.to(CTX)
    pose_model.eval()

    # Loading an video
    vidcap = cv2.VideoCapture(args.videoFile)
    fps = vidcap.get(cv2.CAP_PROP_FPS)
    if fps < args.inferenceFps:
        print('desired inference fps is ' + str(args.inferenceFps) +
              ' but video fps is ' + str(fps))
        exit()
    skip_frame_cnt = round(fps / args.inferenceFps)
    frame_width = int(vidcap.get(cv2.CAP_PROP_FRAME_WIDTH))
    frame_height = int(vidcap.get(cv2.CAP_PROP_FRAME_HEIGHT))
    # outcap = cv2.VideoWriter('{}/{}_pose.avi'.format(args.outputDir, os.path.splitext(os.path.basename(args.videoFile))[0]),
    #                          cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'), int(skip_frame_cnt), (frame_width, frame_height))
    # outcap = cv2.VideoWriter('{}/{}_pose.avi'.format(args.outputDir, 'human'),
    #                          cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'), int(skip_frame_cnt), (frame_width, frame_height))

    # calculate the network parameter amount and operation amount
    # TODO: test the thop function
    # _, image_bgr = vidcap.read()
    # image_rgb = cv2.cvtColor(image_bgr, cv2.COLOR_BGR2RGB)
    # image_per = image_rgb.copy()
    # image_per = Image.fromarray(image_per)
    # image_per = transforms.Compose([transforms.ToTensor()])(image_per)
    # box_macs, box_params = profile(box_model, inputs=(image_per, ))
    # print("The human box parameter amount: {}".format(box_params))
    # print("The human box operation amount: {}".format(box_macs))

    # predict the human box and human pose
    count = 0
    while vidcap.isOpened():
        total_now = time.time()
        ret, image_bgr = vidcap.read()
        count += 1

        if not ret:
            continue

        if count % skip_frame_cnt != 0:
            continue

        image_rgb = cv2.cvtColor(image_bgr, cv2.COLOR_BGR2RGB)

        # Clone 2 image for person detection and pose estimation
        if cfg.DATASET.COLOR_RGB:
            image_per = image_rgb.copy()
            image_pose = image_rgb.copy()
        else:
            image_per = image_bgr.copy()
            image_pose = image_bgr.copy()

        # Clone 1 image for debugging purpose
        image_debug = image_bgr.copy()

        # object detection box
        now = time.time()
        pred_boxes = get_person_detection_boxes(box_model,
                                                image_per,
                                                threshold=0.9)
        then = time.time()
        print("Find person bbox in: {} sec".format(then - now))

        # Can not find people. Move to next frame
        if not pred_boxes:
            count += 1
            continue

        # draw the bounding box
        if args.writeBoxFrames:
            for box in pred_boxes:
                cv2.rectangle(
                    image_debug,
                    box[0],
                    box[1],
                    color=(0, 255, 0),
                    thickness=1)  # Draw Rectangle with the coordinates

        # pose estimation : for multiple people
        centers = []
        scales = []
        for box in pred_boxes:
            center, scale = box_to_center_scale(box, cfg.MODEL.IMAGE_SIZE[0],
                                                cfg.MODEL.IMAGE_SIZE[1])
            centers.append(center)
            scales.append(scale)

        now = time.time()
        pose_preds = get_pose_estimation_prediction(pose_model,
                                                    image_pose,
                                                    centers,
                                                    scales,
                                                    transform=pose_transform)
        then = time.time()
        print("Find person pose in: {} sec".format(then - now))

        # draw joint keypoints and skeletons for every detected people
        new_csv_row = []
        for coords in pose_preds:
            # Draw each point on image and connect them for skeletons
            for idx, coord in enumerate(coords):
                x_coord, y_coord = int(coord[0]), int(coord[1])
                cv2.circle(image_debug, (x_coord, y_coord), 1, (0, 0, 255), 5)
                cv2.putText(image_debug, str(idx), (x_coord, y_coord),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255), 2,
                            cv2.LINE_AA)
                new_csv_row.extend([x_coord, y_coord])

            for skeleton in COCO_SKELETON_INDEXES:
                cv2.line(
                    image_debug,
                    (int(coords[skeleton[0]][0]), int(coords[skeleton[0]][1])),
                    (int(coords[skeleton[1]][0]), int(coords[skeleton[1]][1])),
                    skeleton[2], 2)

        # calculate the estimation time and FPS
        total_then = time.time()
        detect_time = total_then - total_now
        detect_fps = 1.0 / detect_time
        print("2D Human Pose Estimation in FPS: {:.2f}".format(detect_fps))

        # put the needed infos(FPS/time/text) on the image stream
        text = "FPS:{0:0>5.2f}/{1:0>6.2f}ms".format(detect_fps,
                                                    detect_time * 1000)
        cv2.putText(image_debug, text, (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.6,
                    (0, 0, 255), 2, cv2.LINE_AA)

        # show the image stream in window
        cv2.namedWindow("2D Human Pose Estimation", cv2.WINDOW_NORMAL)
        cv2.imshow("2D Human Pose Estimation", image_debug)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

        # save the images and video
        csv_output_rows.append(new_csv_row)
        img_file = os.path.join(pose_dir, 'pose_{:08d}.jpg'.format(count))
        cv2.imwrite(img_file, image_debug)
        # outcap.write(image_debug)

    # write csv
    csv_headers = ['frame']
    for keypoint in COCO_KEYPOINT_INDEXES.values():
        csv_headers.extend([keypoint + '_x', keypoint + '_y'])

    csv_output_filename = os.path.join(args.outputDir, 'pose-data.csv')
    with open(csv_output_filename, 'w', newline='') as csvfile:
        csvwriter = csv.writer(csvfile)
        csvwriter.writerow(csv_headers)
        csvwriter.writerows(csv_output_rows)

    vidcap.release()
    # outcap.release()

    cv2.destroyAllWindows()
    print("Live Video Done.")
    def __init__(self, config_file, weight_file):

        update_config(cfg, config_file)
        self.cfg = cfg        
        self.trtmodel = TRTModel(weight_file)