Ejemplo n.º 1
0
def evaluate(model_weight=None):
    arg_parser = argparse.ArgumentParser()
    arg_parser.add_argument("--batch_size", type=int, default=128)
    arg_parser.add_argument("--num_instances", type=int, default=1)
    arg_parser.add_argument("--model_weight",
                            type=str,
                            default=f"results/train/weights/{model_weight}.pt")
    args = arg_parser.parse_args()

    train_meta = pd.read_csv(
        "/datasets/objstrgzip/03_face_verification_angle/train/train_meta.csv")
    num_classes = len(set(train_meta["face_id"].values))

    # 모델 및 weight 로드
    model = Triarchy(args, num_classes, train=False)
    model.load_state_dict(torch.load(args.model_weight))
    print(f"Loaded weight {args.model_weight}")
    print(f"Number of Parameters: {count_parameters(model)}")

    # Trainer 인스턴스 생성 및 data loader 로드
    trainer = Trainer(model, args, logging=False)
    data_loader = trainer.model.get_test_data_loader("test", "test_label.csv")

    # Evaluation
    trainer.eval(data_loader, train=False, save=True)
 def fit(self,
         initial_theta,
         stochastic=False,
         adapt=False,
         batch_size=1,
         learning_rate=0.1,
         momentum=0.9,
         epoch=1000,
         threshold=1e-4,
         regularization_lambda=1.0):
     if stochastic:
         (result, costs) = Trainer.batch_gradient_descent(
             learning_rate=0.05,
             apapt=adapt,
             batch_size=batch_size,
             epoch=epoch,
             costFunction=self.costFunctionReg,
             theta=initial_theta,
             X=self.X,
             Y=self.Y,
             l=0.0)
     else:
         (result, costs) = Trainer.gradient_descent(
             maxiter=epoch,
             learning_rate=0.5,
             momentum=momentum,
             threshold=threshold,
             costFunction=self.costFunctionReg,
             theta=initial_theta,
             X=self.X,
             Y=self.Y,
             l=regularization_lambda)
     self.trained_theta = result['theta']
     return result, costs
Ejemplo n.º 3
0
def train(pretext_model="Pretext_1593000476"):
    # Reproducibility를 위한 모든 random seed 고정
    random_seed()

    arg_parser = argparse.ArgumentParser()
    arg_parser.add_argument("--epochs", type=int, default=110)
    arg_parser.add_argument("--lr", type=float, default=0.001)
    arg_parser.add_argument("--batch_size", type=int, default=8)
    arg_parser.add_argument("--num_instances", type=int, default=16)
    arg_parser.add_argument("--pretrained_weight", type=str, default=f"results/train/weights/{pretext_model}.pt")
    args = arg_parser.parse_args()

    train_meta = pd.read_csv("/datasets/objstrgzip/03_face_verification_angle/train/train_meta.csv")
    num_classes = len(set(train_meta["face_id"].values))

    # 모델 로드
    model = Triarchy(args, num_classes, train=True)

    # Train dataset의 표정 및 camera angle을 학습하도록 한 pretext model 가중치 로드
    pretrained_weight = torch.load(args.pretrained_weight)
    pretrained_weight.pop("fc.weight")
    pretrained_weight.pop("fc.bias")
    model.load_state_dict(pretrained_weight, strict=False)

    # Trainer 인스턴스 생성 및 학습
    trainer = Trainer(model, args)
    trainer.train()

    return trainer.model_name
    def fit_multi_class(self, initial_theta, stochastic=False, adapt=False, batch_size=1, learning_rate=0.1,
                        momentum=0.0,
                        epoch=1000, threshold=1e-4, regularization_lambda=1.0):
        all_theta = np.matrix(np.zeros((len(initial_theta), len(self.labels))))
        if stochastic:
            col = 0;
            for c in self.labels:
                Yc = (self.Y == c).astype(float)
                (result, cost) = Trainer.batch_gradient_descent(learning_rate=learning_rate, adapt=adapt,
                                                                batch_size=batch_size, epoch=epoch,
                                                                costFunction=self.costFunctionReg, theta=initial_theta,
                                                                X=self.X, Y=Yc, l=0.0)
                all_theta[:, col] = result['theta']
                col += 1

            self.trained_theta = all_theta
        else:

            col = 0;
            for c in self.labels:
                Yc = (self.Y == c).astype(float)
                (result, cost) = Trainer.gradient_descent(maxiter=epoch, learning_rate=learning_rate, momentum=momentum,
                                                          threshold=threshold, costFunction=self.costFunctionReg,
                                                          theta=initial_theta, X=self.X, Y=Yc, l=regularization_lambda)
                all_theta[:, col] = result['theta']
                col += 1

            self.trained_theta = all_theta
        return all_theta
Ejemplo n.º 5
0
def main(train: bool, gpu: bool, idx: str):
    print("Starting time: {}".format(time.asctime()))

    # To have a more verbose output in case of an exception
    faulthandler.enable()

    Parameters.train_model = train
    Parameters.use_gpu = gpu
    if idx is not None:
        Parameters.idx = idx

    if Parameters.train_model is True:
        # Instantiating the trainer
        trainer = Trainer(Parameters)
        # Training the model
        avg_losses = trainer.train_model()
        # Plot losses
        plotlosses(
            avg_losses,
            title="Average Loss per Epoch",
            xlabel="Epoch",
            ylabel="Average Loss",
        )

    # Instantiating the tester
    tester = Tester(Parameters)
    # Testing the model
    tester.test_random_sample()
    # Testing the model accuracy
    # test_losses = tester.test_model()

    print("Finishing time: {}".format(time.asctime()))
 def fit(self, initial_theta, stochastic=False, adapt=False, batch_size=1, learning_rate=0.1, momentum=0.9,
         epoch=1000, threshold=1e-4, regularization_lambda=1.0):
     if stochastic:
         (result, costs) = Trainer.batch_gradient_descent(learning_rate=0.05, apapt=adapt, batch_size=batch_size,
                                                          epoch=epoch, costFunction=self.costFunctionReg,
                                                          theta=initial_theta, X=self.X, Y=self.Y, l=0.0)
     else:
         (result, costs) = Trainer.gradient_descent(maxiter=epoch, learning_rate=0.5, momentum=momentum,
                                                    threshold=threshold, costFunction=self.costFunctionReg,
                                                    theta=initial_theta, X=self.X, Y=self.Y, l=regularization_lambda)
     self.trained_theta = result['theta']
     return result, costs
    def fit_multi_class(self,
                        initial_theta,
                        stochastic=False,
                        adapt=False,
                        batch_size=1,
                        learning_rate=0.1,
                        momentum=0.0,
                        epoch=1000,
                        threshold=1e-4,
                        regularization_lambda=1.0):
        all_theta = np.matrix(np.zeros((len(initial_theta), len(self.labels))))
        if stochastic:
            col = 0
            for c in self.labels:
                Yc = (self.Y == c).astype(float)
                (result, cost) = Trainer.batch_gradient_descent(
                    learning_rate=learning_rate,
                    adapt=adapt,
                    batch_size=batch_size,
                    epoch=epoch,
                    costFunction=self.costFunctionReg,
                    theta=initial_theta,
                    X=self.X,
                    Y=Yc,
                    l=0.0)
                all_theta[:, col] = result['theta']
                col += 1

            self.trained_theta = all_theta
        else:

            col = 0
            for c in self.labels:
                Yc = (self.Y == c).astype(float)
                (result, cost) = Trainer.gradient_descent(
                    maxiter=epoch,
                    learning_rate=learning_rate,
                    momentum=momentum,
                    threshold=threshold,
                    costFunction=self.costFunctionReg,
                    theta=initial_theta,
                    X=self.X,
                    Y=Yc,
                    l=regularization_lambda)
                all_theta[:, col] = result['theta']
                col += 1

            self.trained_theta = all_theta
        return all_theta
Ejemplo n.º 8
0
    # define model
    obj_label_len = len(pose_fields) + len(
        label_map)  # 9 for poses, rest for object classes
    model = VR3Dense(in_channels=1, n_xgrids=args.n_xgrids, n_ygrids=args.n_ygrids, obj_label_len=obj_label_len, \
                    dense_depth=args.dense_depth, train_depth_only=args.train_depth_only, train_obj_only=args.train_obj_only, \
                    concat_latent_vector=args.concat_latent_vector)
    model = model.to(device)

    # load weights
    model = load_pretrained_weights(model, args.modeldir, exp_str)

    # define trainer
    trainer = Trainer(dataroot=args.dataroot, model=model, dataset=None, mode='test', dense_depth=args.dense_depth, \
                      n_xgrids=args.n_xgrids, n_ygrids=args.n_ygrids, exp_str=exp_str, \
                      epochs=args.epochs, batch_size=args.batch_size, learning_rate=args.learning_rate, \
                      xmin=args.xmin, xmax=args.xmax, ymin=args.ymin, ymax=args.ymax, zmin=args.zmin, zmax=args.zmax, \
                      max_depth=args.max_depth, vol_size_x=args.vol_size_x, vol_size_y=args.vol_size_y, vol_size_z=args.vol_size_z, \
                      img_size_x=args.img_size_x, img_size_y=args.img_size_y, loss_weights=[], \
                      modeldir=args.modeldir, logdir=args.logdir, plotdir=args.plotdir, \
                      model_save_steps=args.model_save_steps, early_stop_steps=args.early_stop_steps)

    # get a list of point-cloud bin files
    pc_filenames = sorted(glob.glob(os.path.join(args.pc_dir, '*.bin')))

    # visualization window
    cv2.namedWindow('VR3Dense', cv2.WINDOW_NORMAL)
    cv2.resizeWindow('VR3Dense', 900, 1440)
    pcd = o3d.geometry.PointCloud()
    vis = None
    if disp_cloud == True:
        vis = o3d.visualization.Visualizer()
        vis.create_window()
Ejemplo n.º 9
0
    # load weights
    best_ckpt_model = os.path.join(model_exp_dir, 'checkpoint_best.pt')
    if (args.use_pretrained_weights == True) and (args.pretrained_weights !=
                                                  'none') and os.path.exists(
                                                      args.pretrained_weights):
        model.load_state_dict(
            torch.load(args.pretrained_weights,
                       map_location=lambda storage, loc: storage))
        print('Loaded pre-trained weights: {}'.format(args.pretrained_weights))
    elif (args.use_pretrained_weights
          == True) and os.path.exists(best_ckpt_model):
        model.load_state_dict(
            torch.load(best_ckpt_model,
                       map_location=lambda storage, loc: storage))
        print('Loaded pre-trained weights: {}'.format(best_ckpt_model))
    elif (args.use_pretrained_weights == True):
        print('Pre-trained weights not found.')

    # define trainer
    trainer = Trainer(dataroot=args.dataroot, model=model, dataset=KITTIObjectDataset, dense_depth=args.dense_depth, \
                      n_xgrids=args.n_xgrids, n_ygrids=args.n_ygrids, exp_str=exp_str, \
                      epochs=args.epochs, batch_size=args.batch_size, learning_rate=args.learning_rate, \
                      xmin=args.xmin, xmax=args.xmax, ymin=args.ymin, ymax=args.ymax, zmin=args.zmin, zmax=args.zmax, \
                      max_depth=args.max_depth, vol_size_x=args.vol_size_x, vol_size_y=args.vol_size_y, vol_size_z=args.vol_size_z, \
                      img_size_x=args.img_size_x, img_size_y=args.img_size_y, loss_weights=loss_weights, \
                      modeldir=args.modeldir, logdir=args.logdir, plotdir=args.plotdir, \
                      model_save_steps=args.model_save_steps, early_stop_steps=args.early_stop_steps, \
                      train_depth_only=args.train_depth_only, train_obj_only=args.train_obj_only)

    # train the model
    trainer.train()
Ejemplo n.º 10
0
    obj_label_len = len(pose_fields) + len(
        label_map)  # 9 for poses, rest for object classes
    model = VR3Dense(in_channels=1, n_xgrids=args.n_xgrids, n_ygrids=args.n_ygrids, obj_label_len=obj_label_len, \
                    dense_depth=args.dense_depth, train_depth_only=args.train_depth_only, train_obj_only=args.train_obj_only, \
                    concat_latent_vector=args.concat_latent_vector)
    model = model.to(device)

    # load weights
    model = load_pretrained_weights(model, args.modeldir, exp_str)

    # define trainer
    trainer = Trainer(dataroot=args.dataroot, model=model, dataset=KITTIObjectDataset, dense_depth=args.dense_depth, \
                      n_xgrids=args.n_xgrids, n_ygrids=args.n_ygrids, exp_str=exp_str, \
                      epochs=args.epochs, batch_size=args.batch_size, learning_rate=args.learning_rate, \
                      xmin=args.xmin, xmax=args.xmax, ymin=args.ymin, ymax=args.ymax, zmin=args.zmin, zmax=args.zmax, \
                      max_depth=args.max_depth, vol_size_x=args.vol_size_x, vol_size_y=args.vol_size_y, vol_size_z=args.vol_size_z, \
                      img_size_x=args.img_size_x, img_size_y=args.img_size_y, loss_weights=[], \
                      mean_lwh=mean_lwh, modeldir=args.modeldir, logdir=args.logdir, plotdir=args.plotdir, \
                      model_save_steps=args.model_save_steps, early_stop_steps=args.early_stop_steps, \
                      train_depth_only=args.train_depth_only, train_obj_only=args.train_obj_only)

    # show 100 samples
    cv2.namedWindow('VR3Dense', cv2.WINDOW_NORMAL)
    cv2.resizeWindow('VR3Dense', 800, 1440)
    for i in range(100):
        sample = trainer.dataset[i]
        ## get true labels visualization
        pc_bbox_img_true = draw_point_cloud_w_bbox(sample['cloud'], sample['label_dict'], \
                                                    xlim=trainer.xlim, ylim=trainer.ylim, zlim=trainer.zlim)
        pc_bbox_img_true_bgr = cv2.cvtColor(pc_bbox_img_true,
                                            cv2.COLOR_RGB2BGR)
Ejemplo n.º 11
0
    indexer.index_dataset(sentences)

    train_sentences = sentences[:-1000]
    dev_sentences = sentences[-1000:]

    train_dataset = RuPosDataset(train_sentences, indexer, device)
    train_sampler = RandomSampler(train_dataset)
    train_iterator = DataLoader(train_dataset,
                                batch_size=256,
                                sampler=train_sampler,
                                collate_fn=train_dataset.collate_fn)

    dev_dataset = RuPosDataset(dev_sentences, indexer, device)
    dev_sampler = SequentialSampler(dev_dataset)
    dev_iterator = DataLoader(dev_dataset,
                              batch_size=256,
                              sampler=dev_sampler,
                              collate_fn=dev_dataset.collate_fn)

    embeddings = load_embeddings(indexer.token_vocab, 'data/cc.ru.300.vec')
    model = SimpleTagger(output_dim=len(indexer.pos_vocab),
                         embedding_matrix=embeddings)
    model.to(device)

    trainer = Trainer(model, train_iterator, dev_iterator)

    for i in range(20):
        print('Epoch: %d' % (i + 1))
        trainer.train_epoch()
        trainer.test_epoch()
Ejemplo n.º 12
0
from src import Trainer

if __name__ == "__main__":
    trainer = Trainer()
    trainer.fine_tune()
    trainer.save_model()