Ejemplo n.º 1
0
def tf_model_checker(torch_ckpt, tf_model):
    cfg = load_yaml('config.yaml')
    torch_model = get_fpn_net(cfg['net'])
    torch_model.load_state_dict(torch_ckpt)
    torch_model.eval()

    x = np.random.rand(2, 3, 320, 320).astype(np.float32)
    x_tf = tf.convert_to_tensor(np.transpose(x, [0, 2, 3, 1]))
    x_torch = torch.from_numpy(x)

    y_tf = tf_model(x_tf)
    y_torch = torch_model(x_torch)

    for f, c in zip(y_tf[0], y_torch[0]):
        f = np.transpose(f.numpy(), [0, 3, 1, 2])
        c = c.detach().numpy()
        np.testing.assert_allclose(c, f, rtol=1e-4, atol=1e-5)

    print('All Good!')
Ejemplo n.º 2
0
from api import decode
from torchvision.transforms.functional import to_pil_image, to_tensor, normalize
from PIL import Image
import numpy as np

if __name__ == "__main__":
    '''model'''
    f_model = '/home/core4/Documents/logs/train_virat/fullvoc2/checkpoints/Epoch_111.pth'
    # fnames = glob.glob('/home/core4/data/S1-20210405T161314Z-001/S1/Videos/*.avi')
    #fnames = [r'/home/core4/data/virat/videos/VIRAT_S_010201_01_000125_000152.mp4']
    fnames = [r'/home/core4/Downloads/The CCTV People Demo 2.mp4']
    height = 320
    width = 320
    '''model'''
    cfg = load_yaml('config.yaml')
    net = get_fpn_net(cfg['net'])
    sd = torch.load(f_model, map_location="cuda")['net_state_dict']
    net.load_state_dict(sd)
    net.eval()
    net.cuda()

    for i_file, fname in enumerate(fnames):
        cap = cv2.VideoCapture(fname.__str__())
        fps = int(cap.get(cv2.CAP_PROP_FPS))
        n_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
        h_frame = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
        w_frame = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
        print(h_frame, w_frame)
        i = 0
        f_res = fname[:-4] + '_res.avi'
        fourcc = cv2.VideoWriter_fourcc(*'XVID')
Ejemplo n.º 3
0
    def train(self):
        torch.backends.cudnn.benchmark = True
        torch.backends.cudnn.deterministic = True

        batch_size = self.train_params['batch_size']
        self.batch_size = batch_size
        num_workers = self.train_params['num_workers']
        pin_memory = self.train_params['pin_memory']
        print('Batch-size = {}'.format(batch_size))

        train_loader = DataLoader(self.train_dataset,
                                  batch_size=batch_size,
                                  shuffle=True,
                                  num_workers=num_workers,
                                  pin_memory=pin_memory,
                                  drop_last=True)
        val_loader = DataLoader(self.val_dataset,
                                batch_size=batch_size,
                                shuffle=False,
                                num_workers=num_workers,
                                pin_memory=pin_memory,
                                drop_last=False)

        # net setup
        print('Preparing net: ')
        net = get_fpn_net(self.net_params)
        # train setup
        lr = self.train_params['lr']
        epochs = self.train_params['epochs']
        weight_decay = self.train_params['weight_decay']

        self.optimizer = optim.Adam(net.parameters(),
                                    lr=lr,
                                    weight_decay=weight_decay,
                                    eps=1e-4)
        if self.net_params['pretrained']:
            checkpoint = torch.load(self.net_params['pretrained_model'],
                                    map_location="cuda")
            net.load_state_dict(checkpoint['net_state_dict'])
            self.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
            for p in self.optimizer.param_groups:
                p['lr'] = lr
            for state in self.optimizer.state.values():
                for k, v in state.items():
                    if torch.is_tensor(v):
                        state[k] = v.cuda()
            print('CHECKPOINT LOADED')
        net.cuda()

        first_epoch = 0
        # scheduler
        if self.sched_type == 'ocp':
            last_epoch = -1 if first_epoch == 0 else first_epoch * len(
                train_loader)
            self.scheduler = OneCycleLR(
                self.optimizer,
                max_lr=lr,
                epochs=epochs,
                last_epoch=last_epoch,
                steps_per_epoch=len(train_loader),
                pct_start=self.train_params['ocp_params']['max_lr_pct'])
        elif self.sched_type == 'multi_step':
            last_epoch = -1 if first_epoch == 0 else first_epoch
            self.scheduler = MultiStepLR(
                self.optimizer,
                milestones=self.train_params['multi_params']['milestones'],
                gamma=self.train_params['multi_params']['gamma'],
                last_epoch=last_epoch)

        #start training

        net.train()
        val_rate = self.train_params['val_rate']
        test_rate = self.train_params['test_rate']
        for epoch in range(first_epoch, epochs):
            self.train_epoch(net, train_loader, epoch)

            if self.sched_type != 'ocp':
                self.writer.log_lr(epoch, self.scheduler.get_last_lr()[0])
                self.scheduler.step()

            if (epoch + 1) % val_rate == 0 or epoch == epochs - 1:
                self.eval(net, val_loader, epoch * len(train_loader))
            if (epoch + 1) % (val_rate *
                              test_rate) == 0 or epoch == epochs - 1:
                self.test_ap(net, epoch)
                self.save_checkpoints(epoch, net)
Ejemplo n.º 4
0
                        type=str,
                        default='../tf_convert/sample_torch.pth',
                        help='torch checkpoint path')
    parser.add_argument('--name',
                        type=str,
                        default='test_tf',
                        help='name of output model')
    parser.add_argument('--map',
                        type=str,
                        default='./models/tf/mapping_table.json',
                        help='Mapping table json file')
    args = parser.parse_args()

    cfg = load_yaml('config.yaml')
    models_path = cfg['paths']['converted_models']

    model_name = args.name
    if model_name is None:
        raise Exception('Must enter a name, --name')

    torch_ckpt = args.ckpt
    torch_ckpt = torch.load(torch_ckpt, map_location='cpu')['net_state_dict']

    with open(args.map, 'r') as j:
        mapping_table = json.load(j)

    tf_model = get_fpn_net(cfg['net'], framework='tf')
    tf_model.build((1, 320, 320, 3))

    convert(torch_ckpt, tf_model, mapping_table, models_path, model_name)