Exemplo n.º 1
0
    def __init__(self, opt):
        if opt.gpus[0] >= 0:
            opt.device = torch.device('cuda')
        else:
            opt.device = torch.device('cpu')

        print('Creating model...')
        self.model = create_model(opt.arch, opt.heads, opt.head_conv, opt=opt)
        self.model = load_model(self.model, opt.load_model, opt)
        self.model = self.model.to(opt.device)
        self.model.eval()
        # inp = (torch.ones([1, 3, 320, 320]).cuda(),
        #        torch.ones([1, 3, 320, 320]).cuda(),
        #        torch.ones([1, 1, 320, 320]).cuda())
        # pytorch_to_caffe.trans_net(self.model, inp, 'res18')
        # pytorch_to_caffe.save_prototxt('{}.prototxt'.format('res18'))
        # pytorch_to_caffe.save_caffemodel('{}.caffemodel'.format('res18'))

        self.opt = opt
        self.trained_dataset = get_dataset(opt.dataset)
        self.mean = np.array(self.trained_dataset.mean,
                             dtype=np.float32).reshape(1, 1, 3)
        self.std = np.array(self.trained_dataset.std,
                            dtype=np.float32).reshape(1, 1, 3)
        self.pause = not opt.no_pause
        self.rest_focal_length = self.trained_dataset.rest_focal_length \
            if self.opt.test_focal_length < 0 else self.opt.test_focal_length
        self.flip_idx = self.trained_dataset.flip_idx
        self.cnt = 0
        self.pre_images = None
        self.pre_image_ori = None
        self.tracker = Tracker(opt)
        self.debugger = Debugger(opt=opt, dataset=self.trained_dataset)
Exemplo n.º 2
0
    def __init__(self, opt):
        if opt.gpus[0] >= 0:
            opt.device = torch.device('cuda')
        else:
            opt.device = torch.device('cpu')

        print('Creating model...')
        self.model = create_model(opt.arch, opt.heads, opt.head_conv, opt=opt)
        self.model = load_model(self.model, opt.load_model, opt)
        self.model = self.model.to(opt.device)
        self.model.eval()

        self.opt = opt
        self.trained_dataset = get_dataset(opt.dataset)
        self.mean = np.array(self.trained_dataset.mean,
                             dtype=np.float32).reshape(1, 1, 3)
        self.std = np.array(self.trained_dataset.std,
                            dtype=np.float32).reshape(1, 1, 3)
        self.pause = not opt.no_pause
        self.rest_focal_length = self.trained_dataset.rest_focal_length \
          if self.opt.test_focal_length < 0 else self.opt.test_focal_length
        self.flip_idx = self.trained_dataset.flip_idx
        self.cnt = 0
        self.pre_images = None
        self.pre_image_ori = None
        self.tracker = Tracker(opt)
        self.debugger = Debugger(opt=opt, dataset=self.trained_dataset)
Exemplo n.º 3
0
def model_thread_fn(q_in, q_out, path, full_precision=False):
    video_id, camera_id, max_frames, width, height = get_video_params(path)
    model = create_model()
    model = load_model(model, 'checkpoints/coco_tracking.pth')
    model.to(torch.device('cuda'))
    model.eval()

    pre_img = None

    for i in range(max_frames):
        img = q_in.get()

        img = torch.from_numpy(img).to(torch.device('cuda'))

        if pre_img is None:
            pre_img = img

        with torch.no_grad():
            with torch.cuda.amp.autocast(enabled=not full_precision):
                out = model(img, pre_img, None)[-1]
                out = sigmoid_output(out)
                dets = generic_decode(out)
        pre_img = img

        q_out.put(dets)
Exemplo n.º 4
0
def run(path, batch_size=4, debug=0):
    pre_model_load = time.time()

    model = create_model()
    model = load_model(model, 'checkpoints/coco_tracking.pth')
    model.to(torch.device('cuda'))
    model.eval()
    del model

    model_loading_time = time.time() - pre_model_load
    multi_vid_list = get_sorted_list(path=path)

    q_in_model = Queue(10)
    q_out_model = Queue(10)
    q_times = Queue(batch_size + 10)

    reader_thread = Thread(target=reader_thread_fn,
                           args=(q_in_model, q_times, batch_size, path,
                                 multi_vid_list))
    model_thread = Thread(target=model_thread_fn,
                          args=(q_in_model, q_out_model))
    tracker_thread = Thread(target=tracker_thread_fn,
                            args=(q_out_model, q_times, multi_vid_list,
                                  model_loading_time, batch_size, debug))

    reader_thread.start()
    model_thread.start()
    tracker_thread.start()

    reader_thread.join()
    model_thread.join()
    tracker_thread.join()

    print("Total time: {}".format(time.time() - pre_model_load),
          file=sys.stderr)
Exemplo n.º 5
0
def convert_onnx(opt):
  os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpus_str
  opt.model_output_list = True
  if opt.gpus[0] >= 0:
    opt.device = torch.device('cuda')
  else:
    opt.device = torch.device('cpu')
  Dataset = dataset_factory[opt.test_dataset]
  opt = opts().update_dataset_info_and_set_heads(opt, Dataset)
  print(opt)
  model = create_model(
      opt.arch, opt.heads, opt.head_conv, opt=opt)
  if opt.load_model != '':
    model = load_model(model, opt.load_model, opt)
  model = model.to(opt.device)
  model.eval()
  dummy_input1 = torch.randn(1, 3, opt.input_h, opt.input_w).to(opt.device)

  if opt.tracking:
    dummy_input2 = torch.randn(1, 3, opt.input_h, opt.input_w).to(opt.device)
    if opt.pre_hm:
      dummy_input3 = torch.randn(1, 1, opt.input_h, opt.input_w).to(opt.device)
      torch.onnx.export(
        model, (dummy_input1, dummy_input2, dummy_input3), 
        "../models/{}.onnx".format(opt.exp_id))
    else:
      torch.onnx.export(
        model, (dummy_input1, dummy_input2), 
        "../models/{}.onnx".format(opt.exp_id))
  else:
    torch.onnx.export(
      model, (dummy_input1, ), 
      "../models/{}.onnx".format(opt.exp_id))
Exemplo n.º 6
0
    def __init__(self, opt):
        if opt.gpus[0] >= 0:
            opt.device = torch.device('cuda')
        else:
            opt.device = torch.device('cpu')

        print('Creating model...')
        self.model = create_model(opt.arch, opt.heads, opt.head_conv, opt=opt)
        self.model = load_model(self.model, opt.load_model, opt)
        self.model = self.model.to(opt.device)
        self.model.eval()

        self.opt = opt
        self.trained_dataset = get_dataset(opt.dataset)
        self.mean = np.array(self.trained_dataset.mean,
                             dtype=np.float32).reshape(1, 1, 3)
        self.std = np.array(self.trained_dataset.std,
                            dtype=np.float32).reshape(1, 1, 3)
        self.pause = not opt.no_pause
        self.rest_focal_length = self.trained_dataset.rest_focal_length \
          if self.opt.test_focal_length < 0 else self.opt.test_focal_length
        self.flip_idx = self.trained_dataset.flip_idx
        self.cnt = 0
        self.pre_images = None
        self.pre_image_ori = None
        self.tracker = Tracker(opt)
        self.debugger = Debugger(opt=opt, dataset=self.trained_dataset)

        self.motion = opt.motion
        if self.motion == 'transformer':
            import sys
            M3_PATH = '/u/jozhang/code/motion3d/'
            sys.path.insert(0, M3_PATH)
            from models.transformer import DPTransformer
            # motion = DPTransformer(2, 64, {'depth': 3, 'heads': 8, 'dim_head': 8, 'mlp_dim': 64, 'dropout': 0.})
            # trans_path = '/scratch/cluster/jozhang/logs/hydra/2021-01-30/15-36-54/models/ckpt-latest.dat'
            ckpt = torch.load(opt.transformer_load_path)
            self.transformer = ckpt['model'].cuda()
            print(
                f'Using transformer motion loaded from {opt.transformer_load_path}'
            )
        elif self.motion == 'zero':
            print(f'Using no motion model')
        elif self.motion == 'cttrack':
            print(f'Using cttrack motion model')
        else:
            assert False, f'Do not recognize such motion model {self.motion}'

        self.negate_motion = opt.negate_motion
        if self.negate_motion:
            logging.warning('Motion is being negated! Are you sure?')

        self.all_pre_images = []
Exemplo n.º 7
0
def load_best_val_loss(opt, model, optimizer, val_loader):
    best_model_path = os.path.join(opt.save_dir, "model_best.pth")
    if os.path.isfile(best_model_path):
        print('Loading Best Model...')
        model, optimizer, start_epoch = load_model(model, best_model_path, opt,
                                                   optimizer)
        trainer = Trainer(opt, model, optimizer, tb_writer=False)
        trainer.set_device(opt.gpus, opt.chunk_sizes, opt.device)
        with torch.no_grad():
            log_dict_val, preds = trainer.val(start_epoch, val_loader)
        return log_dict_val['tot']
    else:
        return 1e8
Exemplo n.º 8
0
    def __init__(self, opt):

        self.model = DecoderRNN(128, opt.dataset)
        if opt.load_model_traj != "":
            self.model = load_model(self.model, opt.load_model_traj, opt)
            if not opt.not_set_cuda_env:
                os.environ["CUDA_VISIBLE_DEVICES"] = opt.gpus_str
            opt.device = torch.device("cuda" if opt.gpus[0] >= 0 else "cpu")
            self.model = self.model.to(opt.device)
            self.model.eval()
        self.opt = opt
        if opt.dataset == "nuscenes":
            self.MAX_dis_fut = 4
        else:
            self.MAX_dis_fut = 5
Exemplo n.º 9
0
def run(path, batch_size=4, debug=0):
    pre_model_load = time.time()

    model = create_model()
    model = load_model(model, 'checkpoints/coco_tracking.pth')
    model.to(torch.device('cuda'))
    model.eval()

    model_loading_time = time.time() - pre_model_load

    multi_vid_list = get_sorted_list(path=path)
    vid_managers = [VideoManager(path, vid_list, model_loading_time) for vid_list in multi_vid_list[:batch_size]]

    next_video_id = len(vid_managers)
    processed_frames = 0

    done = False
    while len(vid_managers) > 0:
        imgs = [manager.get_img() for manager in vid_managers]
        cur_imgs = torch.cat([x[0] for x in imgs], dim=0)
        prev_imgs = torch.cat([x[1] for x in imgs], dim=0)
        with torch.no_grad():
            with torch.cuda.amp.autocast(enabled=True):
                out = model(cur_imgs, prev_imgs, None)[-1]
                out = sigmoid_output(out)
                dets = generic_decode(out)

        for k in dets:
            dets[k] = dets[k].detach().cpu().numpy()

        processed_frames += len(vid_managers)

        for i, manager in reversed(list(enumerate(vid_managers))):
            single_dets = {k: v[np.newaxis, i] for k, v in dets.items()}
            manager.process_output(single_dets)

            if manager.is_done():
                manager.finalize()
                if next_video_id < len(multi_vid_list):
                    vid_managers[i] = VideoManager(path, multi_vid_list[next_video_id], model_loading_time)
                    next_video_id += 1
                else:
                    del vid_managers[i]

        if debug:
            frame_time = time.time() - pre_model_load
            FPS = processed_frames / frame_time
            print("At frame {} FPS {}".format(processed_frames, FPS), file=sys.stderr)
Exemplo n.º 10
0
def model_thread_fn(q_in, q_out):
    model = create_model()
    model = load_model(model, 'checkpoints/coco_tracking.pth')
    model.to(torch.device('cuda'))
    model.eval()

    while True:
        cur_imgs, prev_imgs = q_in.get(timeout=30)
        if cur_imgs is None:
            break
        with torch.no_grad():
            with torch.cuda.amp.autocast(enabled=True):
                out = model(cur_imgs, prev_imgs, None)[-1]
                out = sigmoid_output(out)
                dets = generic_decode(out)
        q_out.put(dets)
Exemplo n.º 11
0
def main(opt):
  print('Creating model...')
  torch.backends.cudnn.benchmark = not opt.not_cuda_benchmark and not opt.test
  Dataset = get_dataset(opt.dataset)
  opt = opts().update_dataset_info_and_set_heads(opt, Dataset)
  print(opt)
  if not opt.not_set_cuda_env:
    os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpus_str
  opt.device = torch.device('cuda' if opt.gpus[0] >= 0 else 'cpu')
  model = create_model(opt.arch, opt.heads, opt.head_conv, opt=opt)

  optimizer = get_optimizer(opt, model)

  if opt.load_model != '':
      model, optimizer, start_epoch = load_model(
        model, opt.load_model, opt, optimizer)
Exemplo n.º 12
0
def train():
    device = torch.device('cuda' if cfg.GPU[0] >= 0 else 'cpu')

    start_epoch = 1
    if start_epoch == 1:
        train_log = open(os.path.join(cfg.LOG_DIR, "train_log.csv"), 'w')
        train_log_title = "epoch,total_loss,classify_loss,angle_loss,iou_loss\n"
        train_log.write(train_log_title)
        train_log.flush()
    else:
        train_log = open(os.path.join(cfg.LOG_DIR, "train_log.csv"), 'a')

    print('Creating model...')
    model = create_model()
    if start_epoch != 1:
        model = load_model(
            model, 'logs/weights/model_epoch_{}.pth'.format(start_epoch - 1))
    optimizer = torch.optim.Adam(model.parameters(), cfg.LR)

    trainer = Trainer(model, optimizer)
    trainer.set_device(device)
    print('Setting up data...')
    train_loader = DataLoader(LatexDataset(),
                              batch_size=cfg.BATCH_SIZE,
                              shuffle=True,
                              num_workers=cfg.NUM_WORKERS,
                              pin_memory=True,
                              drop_last=True)
    print('Starting training...')
    epoch = start_epoch
    for epoch in range(start_epoch, start_epoch + cfg.EPOCHS):
        trainer.train(epoch, train_loader, train_log)
        if epoch % 5 == 0:
            save_model('logs/weights/model_epoch_{}.pth'.format(epoch), epoch,
                       model)

    save_model(os.path.join(cfg.WEIGHTS_DIR, 'model_last.pth'), epoch, model)
Exemplo n.º 13
0
    def __init__(self, opt):
        if opt.gpus[0] >= 0:
            opt.device = torch.device("cuda")
        else:
            opt.device = torch.device("cpu")

        print("Creating model...")
        self.model = create_model(opt.arch, opt.heads, opt.head_conv, opt=opt)
        self.model = load_model(self.model, opt.load_model, opt)
        self.model = self.model.to(opt.device)
        self.model.eval()

        self.opt = opt
        self.trained_dataset = get_dataset(opt.dataset)
        self.mean = np.array(self.trained_dataset.mean,
                             dtype=np.float32).reshape(1, 1, 3)
        self.std = np.array(self.trained_dataset.std,
                            dtype=np.float32).reshape(1, 1, 3)
        #     self.pause = not opt.no_pause
        self.rest_focal_length = (self.trained_dataset.rest_focal_length
                                  if self.opt.test_focal_length < 0 else
                                  self.opt.test_focal_length)
        self.flip_idx = self.trained_dataset.flip_idx
        self.cnt = 0
        self.pre_images = None
        self.pre_image_ori = None
        self.dataset = opt.dataset
        if self.dataset == "nuscenes":
            self.tracker = {}
            for class_name in NUSCENES_TRACKING_NAMES:
                self.tracker[class_name] = Tracker(opt, self.model)
        else:
            self.tracker = Tracker(opt, self.model)
        self.debugger = Debugger(opt=opt, dataset=self.trained_dataset)
        self.img_height = 100
        self.img_width = 100
Exemplo n.º 14
0
def main(opt):
    torch.manual_seed(opt.seed)
    torch.backends.cudnn.benchmark = not opt.not_cuda_benchmark and not opt.test
    Dataset = get_dataset(opt.dataset)
    opt = opts().update_dataset_info_and_set_heads(opt, Dataset)
    print(opt)
    if not opt.not_set_cuda_env:
        os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpus_str
    opt.device = torch.device('cuda' if opt.gpus[0] >= 0 else 'cpu')
    logger = Logger(opt)

    print('Creating model...')
    model = create_model(opt.arch, opt.heads, opt.head_conv, opt=opt)
    optimizer = get_optimizer(opt, model)
    start_epoch = 0
    if opt.load_model != '':
        model, optimizer, start_epoch = load_model(model, opt.load_model, opt,
                                                   optimizer)

    ############################################3333
    #freezing backbone and one head
    for param in model.parameters():
        # print(param)
        param.requires_grad = False

    req_grad = ["model.hm_bdd", "model.wh_bdd", "model.reg_bdd"]
    # for hd in model.reg_tl:
    for custom_head in (req_grad):
        for hd in eval(custom_head):
            # print(hd.parameters())
            for wt in hd.parameters():
                # print(wt)
                wt.requires_grad = True

    ######################################################

    trainer = Trainer(opt, model, optimizer)
    trainer.set_device(opt.gpus, opt.chunk_sizes, opt.device)

    if opt.val_intervals < opt.num_epochs or opt.test:
        print('Setting up validation data...')
        val_loader = torch.utils.data.DataLoader(Dataset(opt, 'val'),
                                                 batch_size=1,
                                                 shuffle=False,
                                                 num_workers=1,
                                                 pin_memory=True)

        if opt.test:
            _, preds = trainer.val(0, val_loader)
            val_loader.dataset.run_eval(preds, opt.save_dir)
            return

    print('Setting up train data...')
    train_loader = torch.utils.data.DataLoader(Dataset(opt, 'train'),
                                               batch_size=opt.batch_size,
                                               shuffle=True,
                                               num_workers=opt.num_workers,
                                               pin_memory=True,
                                               drop_last=True)

    print('Starting training...')
    for epoch in range(start_epoch + 1, opt.num_epochs + 1):
        mark = epoch if opt.save_all else 'last'
        log_dict_train, _ = trainer.train(epoch, train_loader)
        logger.write('epoch: {} |'.format(epoch))
        for k, v in log_dict_train.items():
            logger.scalar_summary('train_{}'.format(k), v, epoch)
            logger.write('{} {:8f} | '.format(k, v))
        if opt.val_intervals > 0 and epoch % opt.val_intervals == 0:
            save_model(os.path.join(opt.save_dir, 'model_{}.pth'.format(mark)),
                       epoch, model, optimizer)
            with torch.no_grad():
                log_dict_val, preds = trainer.val(epoch, val_loader)
                if opt.eval_val:
                    val_loader.dataset.run_eval(preds, opt.save_dir)
            for k, v in log_dict_val.items():
                logger.scalar_summary('val_{}'.format(k), v, epoch)
                logger.write('{} {:8f} | '.format(k, v))
        else:
            save_model(os.path.join(opt.save_dir, 'model_last.pth'), epoch,
                       model, optimizer)
        logger.write('\n')
        # if epoch in opt.save_point:
        if epoch % opt.save_point[0] == 0:
            save_model(
                os.path.join(opt.save_dir, 'model_{}.pth'.format(epoch)),
                epoch, model, optimizer)
        if epoch in opt.lr_step:
            lr = opt.lr * (0.1**(opt.lr_step.index(epoch) + 1))
            print('Drop LR to', lr)
            for param_group in optimizer.param_groups:
                param_group['lr'] = lr
    logger.close()
Exemplo n.º 15
0
def main(opt):
    torch.manual_seed(opt.seed)
    torch.backends.cudnn.benchmark = not opt.not_cuda_benchmark and not opt.test
    Dataset = get_dataset(opt.dataset)
    opt = opts().update_dataset_info_and_set_heads(opt, Dataset)
    print(opt)
    if not opt.not_set_cuda_env:
        os.environ["CUDA_VISIBLE_DEVICES"] = opt.gpus_str
    opt.device = torch.device("cuda" if opt.gpus[0] >= 0 else "cpu")
    logger = Logger(opt)

    print("Creating model...")
    model = create_model(opt.arch, opt.heads, opt.head_conv, opt=opt)
    optimizer = get_optimizer(opt, model)
    start_epoch = 0
    if opt.load_model != "":
        model, optimizer, start_epoch = load_model(
            model, opt.load_model, opt, optimizer
        )

    for i, param in enumerate(model.parameters()):
        param.requires_grad = True
    trainer = Trainer(opt, model, optimizer)
    trainer.set_device(opt.gpus, opt.chunk_sizes, opt.device)

    if opt.val_intervals < opt.num_epochs or opt.test:
        print("Setting up validation data...")
        val_loader = torch.utils.data.DataLoader(
            Dataset(opt, "val"),
            batch_size=1,
            shuffle=False,
            num_workers=1,
            pin_memory=True,
        )

        if opt.test:
            _, preds = trainer.val(0, val_loader)
            val_loader.dataset.run_eval(preds, opt.save_dir)
            return

    print("Setting up train data...")
    train_loader = torch.utils.data.DataLoader(
        Dataset(opt, "train"),
        batch_size=opt.batch_size,
        shuffle=True,
        num_workers=opt.num_workers,
        pin_memory=True,
        drop_last=True,
    )

    print("Starting training...")
    for epoch in range(start_epoch + 1, opt.num_epochs + 1):
        save_model(
            os.path.join(opt.save_dir, "model_{}.pth".format(epoch)),
            epoch,
            model,
            optimizer,
        )
        mark = epoch if opt.save_all else "last"
        log_dict_train, _ = trainer.train(epoch, train_loader)
        logger.write("epoch: {} |".format(epoch))
        for k, v in log_dict_train.items():
            logger.scalar_summary("train_{}".format(k), v, epoch)
            logger.write("{} {:8f} | ".format(k, v))
        if opt.val_intervals > 0 and epoch % opt.val_intervals == 0:
            save_model(
                os.path.join(opt.save_dir, "model_{}.pth".format(mark)),
                epoch,
                model,
                optimizer,
            )
            with torch.no_grad():
                log_dict_val, preds = trainer.val(epoch, val_loader)
                if opt.eval_val:
                    val_loader.dataset.run_eval(preds, opt.save_dir)
            for k, v in log_dict_val.items():
                logger.scalar_summary("val_{}".format(k), v, epoch)
                logger.write("{} {:8f} | ".format(k, v))
        else:
            save_model(
                os.path.join(opt.save_dir, "model_last.pth"), epoch, model, optimizer
            )
        logger.write("\n")
        #     if epoch in opt.save_point:
        save_model(
            os.path.join(opt.save_dir, "model_{}.pth".format(epoch)),
            epoch,
            model,
            optimizer,
        )
        if epoch in opt.lr_step:
            lr = opt.lr * (0.1 ** (opt.lr_step.index(epoch) + 1))
            print("Drop LR to", lr)
            for param_group in optimizer.param_groups:
                param_group["lr"] = lr
    logger.close()
Exemplo n.º 16
0
def run_single_video_serial(path, debug=0, full_precision=False):
    init_time = time.time()
    if debug >= 1:
        print("Starting for video: {}".format(path), file=sys.stderr)

    video_id, camera_id, max_frames, width, height = get_video_params(path)

    cap = cv2.VideoCapture(path)

    model = create_model()
    model = load_model(model, 'checkpoints/coco_tracking.pth')
    model.to(torch.device('cuda'))
    model.eval()

    tracker = Tracker(init_time,
                      video_id,
                      max_frames,
                      camera_id,
                      width,
                      height,
                      debug=debug)

    preprocess_function = get_img_transform(height, width, new_size=512)
    postprocess_trans = get_postprocess_trans(height, width)
    region_mask = get_region_mask(camera_id, height, width)
    region_mask = np.where(region_mask, 255, 0).astype(np.uint8)

    if debug > 2:
        cv2.imwrite("mask.png", region_mask)

    pre_img = None

    for i in range(max_frames):
        ret, frame = cap.read()
        if debug >= 2:
            cv2.imshow("Frame", frame)
            cv2.waitKey(1)
            tracker.frame = np.copy(frame)

        frame = cv2.bitwise_and(frame, frame, mask=region_mask)

        img = preprocess_function(frame)
        img = torch.from_numpy(img).to(torch.device('cuda'))

        if pre_img is None:
            pre_img = img

        with torch.no_grad():
            with torch.cuda.amp.autocast(enabled=not full_precision):
                out = model(img, pre_img, None)[-1]
                out = sigmoid_output(out)
                dets = generic_decode(out)

        pre_img = img

        for k in dets:
            dets[k] = dets[k].detach().cpu().numpy()

        dets = post_process(dets, postprocess_trans)[0]
        tracker.step(dets)

        if debug >= 1 and i % 100 == 99:
            frame_time = time.time() - init_time
            FPS = (i + 1) / frame_time
            print("At frame {} FPS {}".format(i + 1, FPS), file=sys.stderr)

    tracker.finalize()

    if debug >= 1:
        print("Finished video: {}".format(path), file=sys.stderr)
Exemplo n.º 17
0
def main(opt):
    torch.manual_seed(opt.seed)
    torch.backends.cudnn.benchmark = not opt.not_cuda_benchmark and not opt.test
    Dataset = get_dataset(opt.dataset, prediction_model=True)
    if not opt.not_set_cuda_env:
        os.environ["CUDA_VISIBLE_DEVICES"] = opt.gpus_str
    opt.device = torch.device("cuda" if opt.gpus[0] >= 0 else "cpu")
    device = opt.device
    logger = Logger(opt)

    print("Creating model...")

    model = DecoderRNN(128, opt)
    optimizer = get_optimizer(opt, model)
    start_epoch = 0
    if opt.load_model_traj != "":
        model, optimizer, start_epoch = load_model(model, opt.load_model, opt,
                                                   optimizer)
    loss_function = torch.nn.SmoothL1Loss()

    for i, param in enumerate(model.parameters()):
        param.requires_grad = True

    train_loader = torch.utils.data.DataLoader(
        Dataset(opt, "train"),
        batch_size=1,
        shuffle=True,
        num_workers=16,
        pin_memory=True,
        drop_last=True,
    )

    for state in optimizer.state.values():
        for k, v in state.items():
            if isinstance(v, torch.Tensor):
                state[k] = v.to(device=device, non_blocking=True)
    model = model.to(device)
    loss_function = loss_function.to(device)

    print("Starting training...")
    for epoch in range(start_epoch + 1, opt.num_epochs + 1):
        mark = epoch if opt.save_all else "last"
        for iter_id, (inputs, targets) in enumerate(train_loader):
            inputs = inputs.to(device=device).float()
            targets = targets.to(device=device).view(1, -1).float()
            outputs = model(inputs)
            loss = loss_function(outputs, targets)
            if 100 * loss.item() < 20:
                loss = 100 * loss
            else:
                loss = 10 * loss
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            del outputs, loss

        save_model(os.path.join(opt.save_dir, "model_last.pth"), epoch, model,
                   optimizer)
        logger.write("\n")
        save_model(
            os.path.join(opt.save_dir, "model_{}.pth".format(epoch)),
            epoch,
            model,
            optimizer,
        )
        if epoch in opt.lr_step:
            lr = opt.lr * (0.1**(opt.lr_step.index(epoch) + 1))
            for param_group in optimizer.param_groups:
                param_group["lr"] = lr
    logger.close()
Exemplo n.º 18
0
import os
from io import BytesIO
from PIL import Image
from model.model import make_prediction, load_model, load_cat_to_name, pretty_print_prediction
from model.image import decode_image, transform_image, image_to_tensor

# Start app
# Change default location of index.html from ./templates to ./static
app = Flask(__name__, static_url_path='', template_folder='static')

# Activate CORS.
CORS(app, resources={r'/*': {'origins': 'http://*****:*****@app.route("/api/predict/", methods=['POST'])
def root():
    image = decode_image(request.data)
    image = transform_image(image)
    image = image_to_tensor(image)

    # Instantiate the model
    prediction, prob_rounded = make_prediction(image, model, cat_to_name)
    prediction = pretty_print_prediction(prediction)

    return jsonify({'data': prediction, 'prob_rounded': prob_rounded})
Exemplo n.º 19
0
def online_fit(num_timesteps, num_targets, num_tweets=300):

    stabilize_logs()

    dir_path = os.path.dirname(os.path.abspath(__file__))

    conn = sqlite3.connect(os.path.join(dir_path, 'historical.db'))

    cursor = conn.cursor()

    #for debugging to simulate a 1 hour pass time
    #cursor.execute("DELETE FROM historical ORDER BY date DESC LIMIT 1")

    cursor.execute("SELECT * FROM historical ORDER BY date DESC LIMIT 1")
    last_record = cursor.fetchall()
    from_date = arrow.get((float(last_record[0][0]) + 3600000) /
                          1000).format('YYYY-MM-DD HH:mm:ss')

    combined_length = num_timesteps + num_targets
    cursor.execute(
        "SELECT * FROM historical ORDER BY date DESC LIMIT {}".format(
            combined_length - 1)
    )  # need to fit with some data in the db as the model didn't fit itself with said data on the past fit
    precomputed_data = np.asarray(cursor.fetchall(), dtype=np.float32)
    precomputed_data = precomputed_data[::-1]

    file = open(os.path.join(dir_path, "logs/context_prices.txt"), "a")
    file.write(str(precomputed_data[-1][-1]) + "\n")
    file.close()

    conn.commit()
    conn.close()

    unseen_data = get_historical(num_tweets,
                                 from_date=from_date,
                                 is_online=True)

    #actual price from last prediction used for logging with twitter
    actual_price = unseen_data[0][-1]
    file = open(os.path.join(dir_path, "logs/actuals.txt"), "a")
    file.write(str(actual_price) + "\n")
    file.close()

    all_data = np.concatenate((precomputed_data, unseen_data), axis=0)

    # store recent data so that we can get a live prediction
    recent_reference = []
    recent_data = all_data[-num_timesteps:, 1:]
    recent_data = normalize_timestep(recent_data, recent_reference)

    timesteps = split_into_timeseries(all_data, combined_length)

    reference = []
    for i in range(0, len(timesteps)):
        timesteps[i] = normalize_timestep(timesteps[i], reference)

    split_index = len(timesteps[0]) - num_targets
    X_train = timesteps[:, :split_index]
    y_train = timesteps[:, split_index:, -1]

    model = load_model()

    #train the model
    print("TRAINING")
    model.fit(X_train,
              y_train,
              batch_size=512,
              epochs=10,
              validation_split=0,
              verbose=2)
    save_model(model)

    recent_data = np.asarray([recent_data.tolist()])

    future = model.predict(recent_data)
    predictions = (future[0] + 1) * recent_reference[0]
    recent_data[0] = (recent_data[0] + 1) * recent_reference[0]

    # document results in file
    print("WRITING TO LOG")
    file = open(os.path.join(dir_path, "logs/log_online.txt"), "w")
    for timestep in recent_data:
        file.write(str(timestep) + "\n")
    file.write(str(future[0]) + "\n")
    file.close()

    file = open(os.path.join(dir_path, "logs/predictions.txt"), "a")
    file.write(str(predictions[0]) + "\n")
    file.close()

    log_to_twitter(predictions)

    return predictions
Exemplo n.º 20
0
def main(opt):
    torch.manual_seed(opt.seed)
    torch.backends.cudnn.benchmark = not opt.not_cuda_benchmark and not opt.test
    Dataset = get_dataset(opt.dataset)
    opt = opts().update_dataset_info_and_set_heads(opt, Dataset)
    print(opt)

    # Log our parameters into mlflow
    for key, value in vars(opt).items():
        mlflow.log_param(key, value)

    if not opt.not_set_cuda_env:
        os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpus_str
    opt.device = torch.device('cuda' if opt.gpus[0] >= 0 else 'cpu')
    logger = Logger(opt)

    print('Creating model...')
    model = create_model(opt.arch, opt.heads, opt.head_conv, opt=opt)
    optimizer = get_optimizer(opt, model)
    start_epoch = 0
    if opt.load_model != '':
        model, optimizer, start_epoch = load_model(model, opt.load_model, opt,
                                                   optimizer)

    trainer = Trainer(opt, model, optimizer)
    trainer.set_device(opt.gpus, opt.chunk_sizes, opt.device)

    if opt.val_intervals < opt.num_epochs or opt.test:
        print('Setting up validation data...')
        val_loader = torch.utils.data.DataLoader(Dataset(
            opt, 'val', opt.data_name),
                                                 batch_size=1,
                                                 shuffle=False,
                                                 num_workers=1,
                                                 pin_memory=True)

        if opt.test:
            _, preds = trainer.val(0, val_loader)
            val_loader.dataset.run_eval(preds, opt.save_dir)
            return

    print('Setting up train data...')
    train_loader = torch.utils.data.DataLoader(Dataset(opt, 'train',
                                                       opt.data_name),
                                               batch_size=opt.batch_size,
                                               shuffle=True,
                                               num_workers=opt.num_workers,
                                               pin_memory=True,
                                               drop_last=True)

    print('Starting training...')
    best = 1e10
    best_epoch = 1e10
    for epoch in range(start_epoch + 1, opt.num_epochs + 1):
        mark = epoch if opt.save_all else 'last'
        log_dict_train, _ = trainer.train(epoch, train_loader)
        logger.write('epoch: {} |'.format(epoch))
        for k, v in log_dict_train.items():
            logger.scalar_summary('train_{}'.format(k), v, epoch)
            logger.write('{} {:8f} | '.format(k, v))
            mlflow.log_metric('train_{}'.format(k), v, step=epoch)
        if opt.val_intervals > 0 and epoch % opt.val_intervals == 0:
            save_model(os.path.join(opt.save_dir, 'model_{}.pth'.format(mark)),
                       epoch, model, optimizer)
            with torch.no_grad():
                log_dict_val, preds = trainer.val(epoch, val_loader)
                if opt.eval_val:
                    val_loader.dataset.run_eval(preds, opt.save_dir)
            for k, v in log_dict_val.items():
                logger.scalar_summary('val_{}'.format(k), v, epoch)
                logger.write('{} {:8f} | '.format(k, v))
                mlflow.log_metric('val_{}'.format(k), v, step=epoch)
            if log_dict_val[opt.metric] < best:
                best = log_dict_val[opt.metric]
                best_epoch = epoch
                save_model(os.path.join(opt.save_dir, 'model_best.pth'), epoch,
                           model)
        else:
            save_model(os.path.join(opt.save_dir, 'model_last.pth'), epoch,
                       model, optimizer)
        logger.write('\n')
        if epoch in opt.save_point:
            save_model(
                os.path.join(opt.save_dir, 'model_{}.pth'.format(epoch)),
                epoch, model, optimizer)
        # early stopping
        if isinstance(opt.early_stopping, int):
            if epoch - best_epoch > opt.early_stopping:
                msg = 'Stopped {} epoch. Best epoch is {}, score is {}.'.format(
                    epoch, best_epoch, best)
                print(msg)
                logger.write(msg)
                break
        if epoch in opt.lr_step:
            lr = opt.lr * (0.1**(opt.lr_step.index(epoch) + 1))
            print('Drop LR to', lr)
            for param_group in optimizer.param_groups:
                param_group['lr'] = lr

    logger.close()
Exemplo n.º 21
0
    parser.add_argument('-b', action='store_true', help='Use basic model?')
    parser.add_argument('-s', type=str, help='Sessions data (jsonl)')
    parser.add_argument('-o', type=str, help='Output file name?')
    args = parser.parse_args()

    users = pd.read_csv(user_preprocessed_path)
    users.set_index('user_id', inplace=True)
    sessions = pd.read_json(args.s, lines=True)
    data = None
    features = []
    model = None

    if args.b:
        products_preprocessed = pd.read_csv(products_basic_preprocessed_path)
        products_preprocessed.set_index('product_id', inplace=True)
        model = load_model(model_basic_path)
        data = make_basic_data(sessions, products_preprocessed)
        features = BASE_MODEL_ATTRIBUTES
    else:
        products_preprocessed = pd.read_csv(products_mature_preprocessed_path)
        products_preprocessed.set_index('product_id', inplace=True)
        model = load_model(model_mature_path)
        data = make_mature_data(products_preprocessed, sessions, users,
                                get_categories(), category_weights)
        features = MATURE_MODEL_ATTRIBUTES

    indexes, predictions = data.index.tolist(), model.predict(data[features])
    with open(args.o if args.o is not None else 'predictions.csv',
              mode='w',
              newline='') as prediction_file:
        writer = csv.writer(prediction_file,
Exemplo n.º 22
0
from beaker.middleware import SessionMiddleware

from model import model
from bottle_jwt import (JWTProviderPlugin, jwt_auth_required)
from bottle_jwt.backends import AuthBackend

app = bottle.app()
app.config.load_config('./etc/config.conf')
print(app.config)
server_secret = app.config['server_secret']
ssid = app.config['ssid']
twilio = app.config['twilio']

model = Model()
model.load_model()
"""@api {post} /login Login to the service with email and password.
   @apiVersion 0.0.1
   @apiName Login
   @apiGroup User

   @apiParam {String} email the user's email.
   @apiParam {String} password the user's password.

   @apiSuccess {String} email  email of the User.


 """
"""@api {post} /signup Signup to the service with email and password.
   @apiVersion 0.0.1
   @apiName Signup
Exemplo n.º 23
0
    def __init__(self, camera_stream, obstacle_tracking_stream, flags,
                 camera_setup):
        from dataset.dataset_factory import get_dataset
        from model.model import create_model, load_model
        from opts import opts
        from utils.tracker import Tracker

        camera_stream.add_callback(self.on_frame_msg,
                                   [obstacle_tracking_stream])
        self._flags = flags
        self._logger = erdos.utils.setup_logging(self.config.name,
                                                 self.config.log_file_name)
        self._csv_logger = erdos.utils.setup_csv_logging(
            self.config.name + '-csv', self.config.csv_log_file_name)
        self._camera_setup = camera_setup
        # TODO(ionel): Might have to filter labels when running with a coco
        # and a nuscenes model.
        num_classes = {
            'kitti_tracking': 3,
            'coco': 90,
            'mot': 1,
            'nuscenes': 10
        }
        # Other flags:
        # 1) --K ; max number of output objects.
        # 2) --fix_short ; resizes the height of the image to fix short, and
        # the width such the aspect ratio is maintained.
        # 3) --pre_hm ; pre heat map.
        # 4) --input_w; str(camera_setup.width)
        # 5) --input_h; str(camera_setup.height)
        args = [
            'tracking', '--load_model', flags.center_track_model_path,
            '--dataset', flags.center_track_model, '--test_focal_length',
            str(int(camera_setup.get_focal_length())), '--out_thresh',
            str(flags.obstacle_detection_min_score_threshold), '--pre_thresh',
            str(flags.obstacle_detection_min_score_threshold), '--new_thresh',
            str(flags.obstacle_detection_min_score_threshold),
            '--track_thresh',
            str(flags.obstacle_detection_min_score_threshold), '--max_age',
            str(flags.obstacle_track_max_age), '--num_classes',
            str(num_classes[flags.center_track_model]), '--tracking',
            '--hungarian'
        ]
        opt = opts().init(args)
        gpu = True
        if gpu:
            opt.device = torch.device('cuda')
        else:
            opt.device = torch.device('cpu')
        self.opt = opt
        self.model = create_model(opt.arch, opt.heads, opt.head_conv, opt=opt)
        self.model = load_model(self.model, opt.load_model, opt)
        self.model = self.model.to(self.opt.device)
        self.model.eval()

        self.trained_dataset = get_dataset(opt.dataset)
        self.mean = np.array(self.trained_dataset.mean,
                             dtype=np.float32).reshape(1, 1, 3)
        self.std = np.array(self.trained_dataset.std,
                            dtype=np.float32).reshape(1, 1, 3)
        self.rest_focal_length = self.trained_dataset.rest_focal_length \
            if self.opt.test_focal_length < 0 else self.opt.test_focal_length
        self.flip_idx = self.trained_dataset.flip_idx
        self.cnt = 0
        self.pre_images = None
        self.pre_image_ori = None
        self.tracker = Tracker(opt)
Exemplo n.º 24
0
def retrain_model():
    load_model(force_retrain=True)
Exemplo n.º 25
0
import torch
import torch.utils.data
from opts import opts
from model.model import create_model, load_model, save_model
from model.data_parallel import DataParallel
from logger import Logger
from dataset.dataset_factory import get_dataset
from trainer import Trainer
from main import get_optimizer

if __name__ == '__main__':
    opt = opts().parse()
    torch.manual_seed(opt.seed)
    Dataset = get_dataset(opt.dataset)
    opt = opts().update_dataset_info_and_set_heads(opt, Dataset)
    path_1 = '/mnt/3dvision-cpfs/zhuoyu/CenterTrack/exp/ddd/nu_3d_det_uni/model_last.pth'
    path_2 = '/mnt/3dvision-cpfs/zhuoyu/CenterTrack/exp/ddd/nu_3d_det_fix_param/model_last.pth'

    model_1 = create_model(opt.arch, opt.heads, opt.head_conv, opt=opt)
    model_2 = create_model(opt.arch, opt.heads, opt.head_conv, opt=opt)
    optimizer = get_optimizer(opt, model_1)

    model_1, _, _ = load_model(model_1, path_1, opt, optimizer)

    model_2, _, _ = load_model(model_2, path_2, opt, optimizer)

    for p1, p2 in zip(model_1.parameters(), model_2.parameters()):
        if p1.data.ne(p2.data).sum() > 0:
            print(False)
        else:
            print(True)
Exemplo n.º 26
0
def main(opt):
  torch.manual_seed(opt.seed)
  torch.backends.cudnn.benchmark = not opt.not_cuda_benchmark and not opt.eval
  Dataset = get_dataset(opt.dataset)
  opt = opts().update_dataset_info_and_set_heads(opt, Dataset)
  print(opt)
  if not opt.not_set_cuda_env:
    os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpus_str
  opt.device = torch.device('cuda' if opt.gpus[0] >= 0 else 'cpu')
  logger = Logger(opt)

  print('Creating model...')
  model = create_model(opt.arch, opt.heads, opt.head_conv, opt=opt)
  optimizer = get_optimizer(opt, model)
  start_epoch = 0
  lr = opt.lr

  if opt.load_model != '':
    model, optimizer, start_epoch = load_model(
      model, opt.load_model, opt, optimizer)

  trainer = Trainer(opt, model, optimizer)
  trainer.set_device(opt.gpus, opt.chunk_sizes, opt.device)
  
  if opt.val_intervals < opt.num_epochs or opt.eval:
    print('Setting up validation data...')
    val_loader = torch.utils.data.DataLoader(
      Dataset(opt, opt.val_split), batch_size=1, shuffle=False, 
              num_workers=1, pin_memory=True)

    if opt.eval:
      _, preds = trainer.val(0, val_loader)
      val_loader.dataset.run_eval(preds, opt.save_dir, n_plots=opt.eval_n_plots, 
                                  render_curves=opt.eval_render_curves)
      return

  print('Setting up train data...')
  train_loader = torch.utils.data.DataLoader(
      Dataset(opt, opt.train_split), batch_size=opt.batch_size, 
        shuffle=opt.shuffle_train, num_workers=opt.num_workers, 
        pin_memory=True, drop_last=True
  )

  print('Starting training...')
  for epoch in range(start_epoch + 1, opt.num_epochs + 1):
    mark = epoch if opt.save_all else 'last'

    # log learning rate
    for param_group in optimizer.param_groups:
      lr = param_group['lr']
      logger.scalar_summary('LR', lr, epoch)
      break
    
    # train one epoch
    log_dict_train, _ = trainer.train(epoch, train_loader)
    logger.write('epoch: {} |'.format(epoch))
    
    # log train results
    for k, v in log_dict_train.items():
      logger.scalar_summary('train_{}'.format(k), v, epoch)
      logger.write('{} {:8f} | '.format(k, v))
    
    # evaluate
    if opt.val_intervals > 0 and epoch % opt.val_intervals == 0:
      save_model(os.path.join(opt.save_dir, 'model_{}.pth'.format(mark)), 
                 epoch, model, optimizer)
      with torch.no_grad():
        log_dict_val, preds = trainer.val(epoch, val_loader)
        
        # evaluate val set using dataset-specific evaluator
        if opt.run_dataset_eval:
          out_dir = val_loader.dataset.run_eval(preds, opt.save_dir, 
                                                n_plots=opt.eval_n_plots, 
                                                render_curves=opt.eval_render_curves)
          
          # log dataset-specific evaluation metrics
          with open('{}/metrics_summary.json'.format(out_dir), 'r') as f:
            metrics = json.load(f)
          logger.scalar_summary('AP/overall', metrics['mean_ap']*100.0, epoch)
          for k,v in metrics['mean_dist_aps'].items():
            logger.scalar_summary('AP/{}'.format(k), v*100.0, epoch)
          for k,v in metrics['tp_errors'].items():
            logger.scalar_summary('Scores/{}'.format(k), v, epoch)
          logger.scalar_summary('Scores/NDS', metrics['nd_score'], epoch)
      
      # log eval results
      for k, v in log_dict_val.items():
        logger.scalar_summary('val_{}'.format(k), v, epoch)
        logger.write('{} {:8f} | '.format(k, v))
    
    # save this checkpoint
    else:
      save_model(os.path.join(opt.save_dir, 'model_last.pth'), 
                 epoch, model, optimizer)
    logger.write('\n')
    if epoch in opt.save_point:
      save_model(os.path.join(opt.save_dir, 'model_{}.pth'.format(epoch)), 
                 epoch, model, optimizer)
    
    # update learning rate
    if epoch in opt.lr_step:
      lr = opt.lr * (0.1 ** (opt.lr_step.index(epoch) + 1))
      print('Drop LR to', lr)
      for param_group in optimizer.param_groups:
          param_group['lr'] = lr

  logger.close()
Exemplo n.º 27
0
def main(opt):
    torch.manual_seed(opt.seed)
    torch.backends.cudnn.benchmark = not opt.not_cuda_benchmark and not opt.test
    Dataset = get_dataset(opt.dataset)
    opt = opts().update_dataset_info_and_set_heads(opt, Dataset)
    print(opt)
    if not opt.not_set_cuda_env:
        os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpus_str
    opt.device = torch.device('cuda' if opt.gpus[0] >= 0 else 'cpu')
    logger = Logger(opt)

    print('Creating model...')
    model = create_model(opt.arch, opt.heads, opt.head_conv, opt=opt)
    optimizer = get_optimizer(opt, model)
    start_epoch = 0
    if opt.load_model != '':
        model, optimizer, start_epoch = load_model(model, opt.load_model, opt,
                                                   optimizer)

    trainer = Trainer(opt, model, optimizer)
    trainer.set_device(opt.gpus, opt.chunk_sizes, opt.device)

    print('Setting up train data...')
    train_loader = torch.utils.data.DataLoader(Dataset(opt, 'train'),
                                               batch_size=opt.batch_size,
                                               shuffle=True,
                                               num_workers=opt.num_workers,
                                               pin_memory=True,
                                               drop_last=True)

    print('Starting training...')
    # for each epoch, record scale
    bestmota = 0
    bestepoch = 0

    for epoch in range(start_epoch + 1, opt.num_epochs + 1):
        mark = epoch if opt.save_all else 'last'
        log_dict_train, _ = trainer.train(epoch, train_loader)
        logger.write('epoch: {} |'.format(epoch))
        for k, v in log_dict_train.items():
            logger.scalar_summary('train_{}'.format(k), v, epoch)
            logger.write('{} {:8f} | '.format(k, v))
        save_model(os.path.join(opt.save_dir, 'model_last.pth'), epoch, model,
                   optimizer)
        if opt.val_intervals > 0 and epoch % opt.val_intervals == 0:
            save_model(os.path.join(opt.save_dir, 'model_{}.pth'.format(mark)),
                       epoch, model, optimizer)
            # with torch.no_grad():
            #     log_dict_val, preds = trainer.val(epoch, val_loader)
            #     if opt.eval_val:
            #         val_loader.dataset.run_eval(preds, opt.save_dir)
            # for k, v in log_dict_val.items():
            #     logger.scalar_summary('val_{}'.format(k), v, epoch)
            #     logger.write('{} {:8f} | '.format(k, v))
            valset = '17halfval'
            mota, motp = prefetch_test(opt, valset)
            if mota > bestmota:
                bestmota = mota
                bestepoch = epoch
            print('mota = {}, motp = {}, bestmota = {}, bestepoch = {}'.format(
                mota, motp, bestmota, bestepoch))

        logger.write('\n')
        if epoch in opt.save_point:
            save_model(
                os.path.join(opt.save_dir, 'model_{}.pth'.format(epoch)),
                epoch, model, optimizer)
        if epoch in opt.lr_step:
            lr = opt.lr * (0.1**(opt.lr_step.index(epoch) + 1))
            print('Drop LR to', lr)
            for param_group in optimizer.param_groups:
                param_group['lr'] = lr
    logger.close()
Exemplo n.º 28
0
def main(opt):
    torch.manual_seed(opt.seed)
    torch.backends.cudnn.benchmark = not opt.not_cuda_benchmark and not opt.test
    Dataset = get_dataset(opt.dataset)
    print(Dataset)
    opt = opts().update_dataset_info_and_set_heads(opt, Dataset)
    print(opt)
    if not opt.not_set_cuda_env:
        os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpus_str
    opt.device = torch.device('cuda' if opt.gpus[0] >= 0 else 'cpu')
    logger = Logger(opt)

    print('Creating model...')
    model = create_model(opt.arch, opt.heads, opt.head_conv, opt=opt)
    if opt.fix_backbone:
        for param in model.backbone.parameters():
            param.requires_grad = False
    if opt.fix_dla_up:
        for param in model.neck.dla_up.parameters():
            param.requires_grad = False
    if opt.fix_ida_up:
        for param in model.neck.ida_up.parameters():
            param.requires_grad = False
    optimizer = get_optimizer(opt, model)
    start_epoch = 0
    if opt.load_model != '':
        model, optimizer, start_epoch = load_model(model, opt.load_model, opt,
                                                   optimizer)

    trainer = Trainer(opt, model, optimizer, logger)
    trainer.set_device(opt.gpus, opt.chunk_sizes, opt.device)

    if opt.val_intervals < opt.num_epochs or opt.test:
        print('Setting up validation data...')
        val_loader = torch.utils.data.DataLoader(Dataset(opt, 'val'),
                                                 batch_size=1,
                                                 shuffle=False,
                                                 num_workers=1,
                                                 pin_memory=True)

        if opt.test:
            _, preds = trainer.val(0, val_loader)
            val_loader.dataset.run_eval(preds, opt.save_dir)
            return

    print('Setting up train data...')
    if opt.using_randomly_half:
        test_data = Dataset(opt, 'train')
        length = len(test_data)
        torch.random.manual_seed(opt.seed)
        actual_dataset, _ = torch.utils.data.random_split(
            test_data, [
                int(length * opt.use_percent),
                length - int(length * opt.use_percent)
            ])
    else:
        actual_dataset = Dataset(opt, 'train')

    train_loader = torch.utils.data.DataLoader(actual_dataset,
                                               batch_size=opt.batch_size,
                                               shuffle=True,
                                               num_workers=opt.num_workers,
                                               pin_memory=True,
                                               drop_last=True)

    print('Starting training...')
    for epoch in range(start_epoch + 1, opt.num_epochs + 1):
        mark = epoch if opt.save_all else 'last'
        log_dict_train, _ = trainer.train(epoch, train_loader)
        logger.write('epoch: {} |'.format(epoch))
        for k, v in log_dict_train.items():
            logger.scalar_summary('train_{}'.format(k), v, epoch)
            logger.write('{} {:8f} | '.format(k, v))
        if opt.val_intervals > 0 and epoch % opt.val_intervals == 0:
            save_model(os.path.join(opt.save_dir, 'model_{}.pth'.format(mark)),
                       epoch, model, optimizer)
            with torch.no_grad():
                log_dict_val, preds = trainer.val(epoch, val_loader)
                if opt.eval_val:
                    val_loader.dataset.run_eval(preds, opt.save_dir)
            for k, v in log_dict_val.items():
                logger.scalar_summary('val_{}'.format(k), v, epoch)
                logger.write('{} {:8f} | '.format(k, v))
        else:
            save_model(os.path.join(opt.save_dir, 'model_last.pth'), epoch,
                       model, optimizer)
        logger.write('\n')
        if epoch in opt.save_point:
            save_model(
                os.path.join(opt.save_dir, 'model_{}.pth'.format(epoch)),
                epoch, model, optimizer)
        if epoch in opt.lr_step:
            lr = opt.lr * (0.1**(opt.lr_step.index(epoch) + 1))
            print('Drop LR to', lr)
            for param_group in optimizer.param_groups:
                param_group['lr'] = lr
    logger.close()
Exemplo n.º 29
0
    # print("hyper parameter information: ")
    # for key in hp.keys():
    #     print(key, hp[key])

    time_str = time.strftime("%m%d-%H%M", time.localtime(time.time()))
    rootdir = "{}/{}/{}-semi-{}-fixed-{}-ratio-{}-lr-{}/".format(
        "/data/yangy/data_prepare/result", hp['dataname'], time_str,
        str(hp['semi']), str(hp['fixed']), str(hp['ratio']), str(args.lr))
    os.makedirs(rootdir, exist_ok=True)
    hp['rootdir'] = rootdir

    np.save('{}parameter.npy'.format(rootdir), hp)

    # 获取模型
    my_models = load_model(hp)

    #获取数据
    train_data, test_data = load_data(hp)

    #预训练模型
    #my_models = pre_train(hp, my_models, train_data, test_data)

    # 预训练结果
    #result = test(test_data,hp,my_models,'pretrain')

    # 训练模型
    my_models = train(hp, my_models, train_data)

    # 保存模型
    save_model(my_models, rootdir)