コード例 #1
0
def train(pretext_model="Pretext_1593000476"):
    # Reproducibility를 위한 모든 random seed 고정
    random_seed()

    arg_parser = argparse.ArgumentParser()
    arg_parser.add_argument("--epochs", type=int, default=110)
    arg_parser.add_argument("--lr", type=float, default=0.001)
    arg_parser.add_argument("--batch_size", type=int, default=8)
    arg_parser.add_argument("--num_instances", type=int, default=16)
    arg_parser.add_argument("--pretrained_weight", type=str, default=f"results/train/weights/{pretext_model}.pt")
    args = arg_parser.parse_args()

    train_meta = pd.read_csv("/datasets/objstrgzip/03_face_verification_angle/train/train_meta.csv")
    num_classes = len(set(train_meta["face_id"].values))

    # 모델 로드
    model = Triarchy(args, num_classes, train=True)

    # Train dataset의 표정 및 camera angle을 학습하도록 한 pretext model 가중치 로드
    pretrained_weight = torch.load(args.pretrained_weight)
    pretrained_weight.pop("fc.weight")
    pretrained_weight.pop("fc.bias")
    model.load_state_dict(pretrained_weight, strict=False)

    # Trainer 인스턴스 생성 및 학습
    trainer = Trainer(model, args)
    trainer.train()

    return trainer.model_name
コード例 #2
0
def evaluate(model_weight=None):
    arg_parser = argparse.ArgumentParser()
    arg_parser.add_argument("--batch_size", type=int, default=128)
    arg_parser.add_argument("--num_instances", type=int, default=1)
    arg_parser.add_argument("--model_weight",
                            type=str,
                            default=f"results/train/weights/{model_weight}.pt")
    args = arg_parser.parse_args()

    train_meta = pd.read_csv(
        "/datasets/objstrgzip/03_face_verification_angle/train/train_meta.csv")
    num_classes = len(set(train_meta["face_id"].values))

    # 모델 및 weight 로드
    model = Triarchy(args, num_classes, train=False)
    model.load_state_dict(torch.load(args.model_weight))
    print(f"Loaded weight {args.model_weight}")
    print(f"Number of Parameters: {count_parameters(model)}")

    # Trainer 인스턴스 생성 및 data loader 로드
    trainer = Trainer(model, args, logging=False)
    data_loader = trainer.model.get_test_data_loader("test", "test_label.csv")

    # Evaluation
    trainer.eval(data_loader, train=False, save=True)
コード例 #3
0
def main(train: bool, gpu: bool, idx: str):
    print("Starting time: {}".format(time.asctime()))

    # To have a more verbose output in case of an exception
    faulthandler.enable()

    Parameters.train_model = train
    Parameters.use_gpu = gpu
    if idx is not None:
        Parameters.idx = idx

    if Parameters.train_model is True:
        # Instantiating the trainer
        trainer = Trainer(Parameters)
        # Training the model
        avg_losses = trainer.train_model()
        # Plot losses
        plotlosses(
            avg_losses,
            title="Average Loss per Epoch",
            xlabel="Epoch",
            ylabel="Average Loss",
        )

    # Instantiating the tester
    tester = Tester(Parameters)
    # Testing the model
    tester.test_random_sample()
    # Testing the model accuracy
    # test_losses = tester.test_model()

    print("Finishing time: {}".format(time.asctime()))
コード例 #4
0
ファイル: test.py プロジェクト: wx-b/VR3Dense
    # define model
    obj_label_len = len(pose_fields) + len(
        label_map)  # 9 for poses, rest for object classes
    model = VR3Dense(in_channels=1, n_xgrids=args.n_xgrids, n_ygrids=args.n_ygrids, obj_label_len=obj_label_len, \
                    dense_depth=args.dense_depth, train_depth_only=args.train_depth_only, train_obj_only=args.train_obj_only, \
                    concat_latent_vector=args.concat_latent_vector)
    model = model.to(device)

    # load weights
    model = load_pretrained_weights(model, args.modeldir, exp_str)

    # define trainer
    trainer = Trainer(dataroot=args.dataroot, model=model, dataset=None, mode='test', dense_depth=args.dense_depth, \
                      n_xgrids=args.n_xgrids, n_ygrids=args.n_ygrids, exp_str=exp_str, \
                      epochs=args.epochs, batch_size=args.batch_size, learning_rate=args.learning_rate, \
                      xmin=args.xmin, xmax=args.xmax, ymin=args.ymin, ymax=args.ymax, zmin=args.zmin, zmax=args.zmax, \
                      max_depth=args.max_depth, vol_size_x=args.vol_size_x, vol_size_y=args.vol_size_y, vol_size_z=args.vol_size_z, \
                      img_size_x=args.img_size_x, img_size_y=args.img_size_y, loss_weights=[], \
                      modeldir=args.modeldir, logdir=args.logdir, plotdir=args.plotdir, \
                      model_save_steps=args.model_save_steps, early_stop_steps=args.early_stop_steps)

    # get a list of point-cloud bin files
    pc_filenames = sorted(glob.glob(os.path.join(args.pc_dir, '*.bin')))

    # visualization window
    cv2.namedWindow('VR3Dense', cv2.WINDOW_NORMAL)
    cv2.resizeWindow('VR3Dense', 900, 1440)
    pcd = o3d.geometry.PointCloud()
    vis = None
    if disp_cloud == True:
        vis = o3d.visualization.Visualizer()
        vis.create_window()
コード例 #5
0
    obj_label_len = len(pose_fields) + len(
        label_map)  # 9 for poses, rest for object classes
    model = VR3Dense(in_channels=1, n_xgrids=args.n_xgrids, n_ygrids=args.n_ygrids, obj_label_len=obj_label_len, \
                    dense_depth=args.dense_depth, train_depth_only=args.train_depth_only, train_obj_only=args.train_obj_only, \
                    concat_latent_vector=args.concat_latent_vector)
    model = model.to(device)

    # load weights
    model = load_pretrained_weights(model, args.modeldir, exp_str)

    # define trainer
    trainer = Trainer(dataroot=args.dataroot, model=model, dataset=KITTIObjectDataset, dense_depth=args.dense_depth, \
                      n_xgrids=args.n_xgrids, n_ygrids=args.n_ygrids, exp_str=exp_str, \
                      epochs=args.epochs, batch_size=args.batch_size, learning_rate=args.learning_rate, \
                      xmin=args.xmin, xmax=args.xmax, ymin=args.ymin, ymax=args.ymax, zmin=args.zmin, zmax=args.zmax, \
                      max_depth=args.max_depth, vol_size_x=args.vol_size_x, vol_size_y=args.vol_size_y, vol_size_z=args.vol_size_z, \
                      img_size_x=args.img_size_x, img_size_y=args.img_size_y, loss_weights=[], \
                      mean_lwh=mean_lwh, modeldir=args.modeldir, logdir=args.logdir, plotdir=args.plotdir, \
                      model_save_steps=args.model_save_steps, early_stop_steps=args.early_stop_steps, \
                      train_depth_only=args.train_depth_only, train_obj_only=args.train_obj_only)

    # show 100 samples
    cv2.namedWindow('VR3Dense', cv2.WINDOW_NORMAL)
    cv2.resizeWindow('VR3Dense', 800, 1440)
    for i in range(100):
        sample = trainer.dataset[i]
        ## get true labels visualization
        pc_bbox_img_true = draw_point_cloud_w_bbox(sample['cloud'], sample['label_dict'], \
                                                    xlim=trainer.xlim, ylim=trainer.ylim, zlim=trainer.zlim)
        pc_bbox_img_true_bgr = cv2.cvtColor(pc_bbox_img_true,
                                            cv2.COLOR_RGB2BGR)
コード例 #6
0
    indexer.index_dataset(sentences)

    train_sentences = sentences[:-1000]
    dev_sentences = sentences[-1000:]

    train_dataset = RuPosDataset(train_sentences, indexer, device)
    train_sampler = RandomSampler(train_dataset)
    train_iterator = DataLoader(train_dataset,
                                batch_size=256,
                                sampler=train_sampler,
                                collate_fn=train_dataset.collate_fn)

    dev_dataset = RuPosDataset(dev_sentences, indexer, device)
    dev_sampler = SequentialSampler(dev_dataset)
    dev_iterator = DataLoader(dev_dataset,
                              batch_size=256,
                              sampler=dev_sampler,
                              collate_fn=dev_dataset.collate_fn)

    embeddings = load_embeddings(indexer.token_vocab, 'data/cc.ru.300.vec')
    model = SimpleTagger(output_dim=len(indexer.pos_vocab),
                         embedding_matrix=embeddings)
    model.to(device)

    trainer = Trainer(model, train_iterator, dev_iterator)

    for i in range(20):
        print('Epoch: %d' % (i + 1))
        trainer.train_epoch()
        trainer.test_epoch()
コード例 #7
0
from src import Trainer

if __name__ == "__main__":
    trainer = Trainer()
    trainer.fine_tune()
    trainer.save_model()