def train(pretext_model="Pretext_1593000476"): # Reproducibility를 위한 모든 random seed 고정 random_seed() arg_parser = argparse.ArgumentParser() arg_parser.add_argument("--epochs", type=int, default=110) arg_parser.add_argument("--lr", type=float, default=0.001) arg_parser.add_argument("--batch_size", type=int, default=8) arg_parser.add_argument("--num_instances", type=int, default=16) arg_parser.add_argument("--pretrained_weight", type=str, default=f"results/train/weights/{pretext_model}.pt") args = arg_parser.parse_args() train_meta = pd.read_csv("/datasets/objstrgzip/03_face_verification_angle/train/train_meta.csv") num_classes = len(set(train_meta["face_id"].values)) # 모델 로드 model = Triarchy(args, num_classes, train=True) # Train dataset의 표정 및 camera angle을 학습하도록 한 pretext model 가중치 로드 pretrained_weight = torch.load(args.pretrained_weight) pretrained_weight.pop("fc.weight") pretrained_weight.pop("fc.bias") model.load_state_dict(pretrained_weight, strict=False) # Trainer 인스턴스 생성 및 학습 trainer = Trainer(model, args) trainer.train() return trainer.model_name
best_ckpt_model = os.path.join(model_exp_dir, 'checkpoint_best.pt') if (args.use_pretrained_weights == True) and (args.pretrained_weights != 'none') and os.path.exists( args.pretrained_weights): model.load_state_dict( torch.load(args.pretrained_weights, map_location=lambda storage, loc: storage)) print('Loaded pre-trained weights: {}'.format(args.pretrained_weights)) elif (args.use_pretrained_weights == True) and os.path.exists(best_ckpt_model): model.load_state_dict( torch.load(best_ckpt_model, map_location=lambda storage, loc: storage)) print('Loaded pre-trained weights: {}'.format(best_ckpt_model)) elif (args.use_pretrained_weights == True): print('Pre-trained weights not found.') # define trainer trainer = Trainer(dataroot=args.dataroot, model=model, dataset=KITTIObjectDataset, dense_depth=args.dense_depth, \ n_xgrids=args.n_xgrids, n_ygrids=args.n_ygrids, exp_str=exp_str, \ epochs=args.epochs, batch_size=args.batch_size, learning_rate=args.learning_rate, \ xmin=args.xmin, xmax=args.xmax, ymin=args.ymin, ymax=args.ymax, zmin=args.zmin, zmax=args.zmax, \ max_depth=args.max_depth, vol_size_x=args.vol_size_x, vol_size_y=args.vol_size_y, vol_size_z=args.vol_size_z, \ img_size_x=args.img_size_x, img_size_y=args.img_size_y, loss_weights=loss_weights, \ modeldir=args.modeldir, logdir=args.logdir, plotdir=args.plotdir, \ model_save_steps=args.model_save_steps, early_stop_steps=args.early_stop_steps, \ train_depth_only=args.train_depth_only, train_obj_only=args.train_obj_only) # train the model trainer.train()