def __init__(self, config): self.config = config self.flag_gan = False self.train_count = 0 self._min = 9999. self.torchvision_transform = transforms.Compose([ transforms.RandomRotation((-120, 120), fill='black'), transforms.RandomHorizontalFlip(), transforms.ToTensor(), ]) self.pretraining_step_size = self.config.pretraining_step_size self.batch_size = self.config.batch_size self.logger = set_logger('train_epoch.log') # define dataloader self.dataset = DiscriminatorDataset(self.config, self.torchvision_transform) self.dataloader = DataLoader(self.dataset, batch_size=self.batch_size, shuffle=False, num_workers=2, pin_memory=self.config.pin_memory, collate_fn=self.collate_function) self.dataset_test = DiscriminatorDataset(self.config, self.torchvision_transform, True) self.testloader = DataLoader(self.dataset_test, batch_size=self.batch_size, shuffle=False, num_workers=1, pin_memory=self.config.pin_memory, collate_fn=self.collate_function) # define models self.model = Model().cuda() # define loss self.loss = Loss().cuda() # define lr self.lr = self.config.learning_rate # define optimizer self.opt = torch.optim.Adam(self.model.parameters(), lr=self.lr) # define optimize scheduler self.scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau( self.opt, mode='min', factor=0.8, cooldown=20) # initialize train counter self.epoch = 0 self.total_iter = (len(self.dataset) + self.config.batch_size - 1) // self.config.batch_size self.manual_seed = random.randint(10000, 99999) torch.manual_seed(self.manual_seed) torch.cuda.manual_seed_all(self.manual_seed) random.seed(self.manual_seed) # parallel setting gpu_list = list(range(self.config.gpu_cnt)) self.model = nn.DataParallel(self.model, device_ids=gpu_list) # Model Loading from the latest checkpoint if not found start from scratch. self.load_checkpoint(self.config.checkpoint_file) # Summary Writer self.summary_writer = SummaryWriter(log_dir=os.path.join( self.config.root_path, self.config.summary_dir), comment='Discriminator') self.print_train_info()
def __init__(self, config): self.config = config self.flag_gan = False self.train_count = 0 self.best_val_loss = 9999. self.pretraining_step_size = self.config.pretraining_step_size self.batch_size = self.config.batch_size self.logger = set_logger('train_epoch.log') # define dataloader self.dataset = Dataset(self.config, 'train') self.dataloader = DataLoader(self.dataset, batch_size=self.batch_size, shuffle=False, num_workers=2, pin_memory=self.config.pin_memory, collate_fn=self.collate_function) self.val_set = Dataset(self.config, 'val') self.val_loader = DataLoader(self.val_set, batch_size=self.batch_size, shuffle=False, num_workers=1, pin_memory=self.config.pin_memory, collate_fn=self.collate_function) # define models self.encoder = Encoder().cuda() self.edge = Edge().cuda() self.corner = Corner().cuda() self.reg = Regressor().cuda() # define loss self.bce = BCELoss().cuda() self.mse = MSELoss().cuda() # define lr self.lr = self.config.learning_rate # define optimizer self.opt = torch.optim.Adam([{'params': self.encoder.parameters()}, {'params': self.edge.parameters()}, {'params': self.corner.parameters()}, {'params': self.reg.parameters()}], lr=self.lr, eps=1e-6) # define optimize scheduler self.scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(self.opt, mode='min', factor=0.8, cooldown=16, min_lr=8e-5) # initialize train counter self.epoch = 0 self.total_iter = (len(self.dataset) + self.config.batch_size - 1) // self.config.batch_size self.val_iter = (len(self.val_set) + self.config.batch_size - 1) // self.config.batch_size self.manual_seed = random.randint(10000, 99999) torch.manual_seed(self.manual_seed) torch.cuda.manual_seed_all(self.manual_seed) random.seed(self.manual_seed) # parallel setting gpu_list = list(range(self.config.gpu_cnt)) self.encoder = nn.DataParallel(self.encoder, device_ids=gpu_list) self.edge = nn.DataParallel(self.edge, device_ids=gpu_list) self.corner = nn.DataParallel(self.corner, device_ids=gpu_list) self.reg = nn.DataParallel(self.reg, device_ids=gpu_list) # Model Loading from the latest checkpoint if not found start from scratch. self.load_checkpoint(self.config.checkpoint_file) # Summary Writer self.summary_writer = SummaryWriter(log_dir=os.path.join(self.config.root_path, self.config.summary_dir), comment='LayoutNet') self.print_train_info()
def __init__(self, config): self.config = config self.flag_gan = False self.train_count = 0 self.torchvision_transform = transforms.Compose([ transforms.Resize((512, 1024)), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.RandomErasing(p=0.5, scale=(0.02, 0.04), ratio=(0.5, 1.5)), ]) self.pretraining_step_size = self.config.pretraining_step_size self.batch_size = self.config.batch_size self.logger = set_logger('train_epoch.log') # define dataloader self.dataset = INGAN_Dataset(self.config, self.torchvision_transform, 'train_list.txt') self.dataloader = DataLoader(self.dataset, batch_size=self.batch_size, shuffle=False, num_workers=2, pin_memory=self.config.pin_memory, collate_fn=self.collate_function) self.dataset_test = INGAN_Dataset(self.config, self.torchvision_transform, 'test_list.txt') self.testloader = DataLoader(self.dataset_test, batch_size=self.batch_size, shuffle=False, num_workers=1, pin_memory=self.config.pin_memory, collate_fn=self.collate_function) self.assistant_dataset = INGAN_Dataset(self.config, self.torchvision_transform, 'corner_train_list.txt', True) self.assistant_dataloader = DataLoader( self.assistant_dataset, batch_size=self.batch_size, shuffle=False, num_workers=2, pin_memory=self.config.pin_memory, collate_fn=self.assistant_collate_function) self.assistant_dataset_test = INGAN_Dataset(self.config, self.torchvision_transform, 'test_list.txt', True) self.assistant_testloader = DataLoader( self.dataset_test, batch_size=self.batch_size, shuffle=False, num_workers=1, pin_memory=self.config.pin_memory, collate_fn=self.collate_function) # define models self.feature = HorizonBase().cuda() self.floor = FloorMap().cuda() self.corner = Corner().cuda() # define loss self.loss = Loss().cuda() # define lr self.lr = self.config.learning_rate # define optimizer self.opt = torch.optim.Adam([ { 'params': self.feature.parameters() }, { 'params': self.floor.parameters() }, ], lr=self.lr) self.assistant_opt = torch.optim.Adam([ { 'params': self.feature.parameters() }, { 'params': self.corner.parameters() }, ], lr=self.lr) # define optimize scheduler self.scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau( self.opt, mode='min', factor=0.8, cooldown=20) self.assistant_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau( self.opt, mode='min', factor=0.8, cooldown=20) # initialize train counter self.epoch = 0 self.manual_seed = random.randint(10000, 99999) torch.manual_seed(self.manual_seed) torch.cuda.manual_seed_all(self.manual_seed) random.seed(self.manual_seed) # parallel setting gpu_list = list(range(self.config.gpu_cnt)) self.feature = nn.DataParallel(self.feature, device_ids=gpu_list) self.floor = nn.DataParallel(self.floor, device_ids=gpu_list) self.corner = nn.DataParallel(self.corner, device_ids=gpu_list) # Model Loading from the latest checkpoint if not found start from scratch. self.load_checkpoint(self.config.checkpoint_file) # Summary Writer self.summary_writer = SummaryWriter(log_dir=os.path.join( self.config.root_path, self.config.summary_dir), comment='Discriminator') self.print_train_info()
clipping_to_shortest_stream=True, delete_audio_file=False) # save pkl out_dir_vec = out_dir_vec + mean_dir_vec out_poses = convert_dir_vec_to_pose(out_dir_vec) save_dict = { 'sentence': sentence, 'audio': clip_audio.astype(np.float32), 'out_dir_vec': out_dir_vec, 'out_poses': out_poses, 'aux_info': '{}_{}_{}'.format(vid, vid_idx, clip_idx), 'human_dir_vec': target_dir_vec + mean_dir_vec, } with open(os.path.join(save_path, '{}.pkl'.format(filename_prefix)), 'wb') as f: pickle.dump(save_dict, f) n_saved += 1 else: assert False, 'wrong mode' if __name__ == '__main__': mode = sys.argv[1] # {eval, from_db_clip, from_text} ckpt_path = sys.argv[2] option = None if len(sys.argv) > 3: option = sys.argv[3] set_logger() main(mode, ckpt_path, option)
def __init__(self, config): self.config = config self.flag_gan = False self.train_count = 0 self.pretraining_step_size = self.config.pretraining_step_size self.batch_size = self.config.batch_size self.logger = set_logger('train_epoch.log') # define dataloader self.dataset = SampleDataset(self.config) self.dataloader = DataLoader(self.dataset, batch_size=self.batch_size, shuffle=False, num_workers=1, pin_memory=self.config.pin_memory, collate_fn=self.collate_function) # define models self.model = Model().cuda() # define loss self.loss = Loss().cuda() # define lr self.lr = self.config.learning_rate # define optimizer self.opt = torch.optim.Adam(self.model.parameters(), lr=self.lr) # define optimize scheduler self.scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(self.opt, mode='min', factor=0.8, cooldown=6) # initialize train counter self.epoch = 0 self.accumulate_iter = 0 self.total_iter = (len(self.dataset) + self.config.batch_size - 1) // self.config.batch_size self.manual_seed = random.randint(10000, 99999) torch.manual_seed(self.manual_seed) torch.cuda.manual_seed_all(self.manual_seed) random.seed(self.manual_seed) # parallel setting gpu_list = list(range(self.config.gpu_cnt)) self.model = nn.DataParallel(self.model, device_ids=gpu_list) # Model Loading from the latest checkpoint if not found start from scratch. self.load_checkpoint(self.config.checkpoint_file) # Summary Writer self.summary_writer = SummaryWriter(log_dir=os.path.join( self.config.root_path, self.config.summary_dir), comment='BarGen') self.print_train_info()
from global_vars import DATA_DIR from models.ere import ERE from utils.config import config_from_args from utils.evaluation import re_score from utils.train_utils import set_random_seed, set_logger, load_checkpoint if __name__ == "__main__": # Load and check config from args config = config_from_args() if os.path.exists(config.run_dir + f"{config.criterion}_test_scores.json"): assert False, "Run already launched" # Set logger print("Logging in {}".format(os.path.join(config.run_dir, "train.log"))) set_logger(os.path.join(config.run_dir, "train.log")) # Set random seed set_random_seed(config.seed) # Load data assert os.path.exists(DATA_DIR + f"{config.dataset}.json") data, vocab = load_data(DATA_DIR + f"{config.dataset}.json", verbose=False) # Standard mode = training on train set and validation on dev set train_key, dev_key, test_key = "train", "dev", "test" # Training on train + dev => no validation if config.train_mode == "train+dev": train_key, dev_key, test_key = "train+dev", None, "test"