def train(cfg, args): """ train scene graph generation model """ arguments = {} arguments["iteration"] = 0 model = build_model(cfg, arguments, args.local_rank, args.distributed) model.train() return model
def test(cfg, args, model=None): """ test scene graph generation model """ if model is None: arguments = {} arguments["iteration"] = 0 model = build_model(cfg, arguments, args.local_rank, args.distributed) model.test(visualize=args.visualize)
list_input = list() batch_targets = list() for target_name in target_names: img_input, found_coord, original = loader(target_name) list_input.append(img_input) batch_targets.append(found_coord) Image.fromarray(original).save('_sample/{0:s}/img_original.png'.format( target_name[:-4])) batch_input = torch.stack(list_input) # build model model = build_model() model.train() set_batch_norm_eval(model) # load target_coord # batch_targets = list() for targets, target_name in zip(batch_targets, target_names): img_target = None for _, target in targets.iterrows(): img_target = coord2_img(target.x, target.y, target.height, target.width,
cfg.merge_from_file(args.config_file) cfg.resume = args.resume cfg.inference = args.inference cfg.MODEL.USE_FREQ_PRIOR = args.use_freq_prior cfg.MODEL.ALGORITHM = args.algorithm if not os.path.exists("logs") and get_rank() == 0: os.mkdir("logs") logger = setup_logger("scene_graph_generation", "logs", get_rank()) logger.info(args) logger.info("Loaded configuration file {}".format(args.config_file)) output_config_path = os.path.join("logs", 'config.yml') logger.info("Saving config into: {}".format(output_config_path)) save_config(cfg, output_config_path) arguments = {"iteration": 0} model = build_model(cfg, arguments, args.local_rank, args.distributed) model.scene_parser.eval() info = json.load( open(os.path.join(cfg.DATASET.PATH, "VG-SGG-dicts.json"), 'r')) itola = info['idx_to_label'] itopred = info['idx_to_predicate'] if args.image: predict_img(args.image, model) elif args.dir: predict_imgs(args.dir, model) else: predict_with_loader(model)
def main(): if params.debug: logger = init_logger('_log/01_train_debug.log', level=10) else: logger = init_logger('_log/01_train.log', level=20) tic = time.time() logger.info('parameters') logger.info(vars(params)) num_iter = 100 if params.debug else params.num_iter num_epoch = 2 if params.debug else params.num_epoch list_train_img_all = os.listdir(f'../../input/{params.dataset}/train') random.shuffle(list_train_img_all) coords = pd.read_csv(f'../../input/{params.dataset}/coordinates.csv') target_classes = params.target_classes.split(',') logger.info(f'target_classes: {target_classes}') coords = coords.query('name in @target_classes') target_imgs = [f'{s}.png' for s in coords['token']] list_train_img_all = list(set(list_train_img_all) & set(target_imgs)) rate_valid = 0.1 list_train_img = list_train_img_all[:-int( len(list_train_img_all) * rate_valid)] list_valid_img = list_train_img_all[ -int(len(list_train_img_all) * rate_valid):] if params.debug: list_train_img = list(list_train_img[:16]) list_valid_img = list(list_train_img) else: assert len(set(list_train_img) & set(list_valid_img)) == 0 logger.info('number of train images: {}'.format(len(list_train_img))) logger.info('number of valid images: {}'.format(len(list_valid_img))) # build model model = build_model() # optimizer optimizer = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=1e-4) dir_model = Path('_models') dir_model.mkdir(exist_ok=True) # train for each epoch for ep in range(num_epoch): logger.info('') logger.info('==> start epoch {}'.format(ep)) # train model = train_main(model, params.dataset, optimizer, list_train_img, target_classes, num_iter, ep, params.distance_upper_bound) # validate validate(model, params.dataset, list_valid_img, target_classes, epoch=ep) # change learning rate for param_group in optimizer.param_groups: param_group['lr'] *= 0.95 logger.info('change learning rate into: {:.6f}'.format( param_group['lr'])) # save model torch.save(model, dir_model / 'model_ep{}.pt'.format(ep)) # save model torch.save(model, dir_model / 'model.pt') # show elapsed time toc = time.time() - tic logger.info('Elapsed time: {:.1f} [min]'.format(toc / 60.0))
def main(): tic = time.time() # parse arguments parser = argparse.ArgumentParser() parser.add_argument('--num_iter', '-n', type=int, default=1000, help='number of iteration') parser.add_argument('--num_epoch', '-e', type=int, default=30, help='number of epoch') params = parser.parse_args() num_iter = params.num_iter list_train_img_all = os.listdir('../../dataset/train_v2') random.shuffle(list_train_img_all) rate_valid = 0.1 list_train_img = list_train_img_all[:-int( len(list_train_img_all) * rate_valid)] list_valid_img = list_train_img_all[ -int(len(list_train_img_all) * rate_valid):] assert len(set(list_train_img) & set(list_valid_img)) == 0 segmentations = pd.read_csv( '../../dataset/train_ship_segmentations_v2.csv') segmentations = segmentations.fillna('') no_ship_imgs = segmentations.ImageId[segmentations.EncodedPixels == ''].tolist() list_train_img_with_ship = list(set(list_train_img) - set(no_ship_imgs)) list_train_img_no_ship = list(set(list_train_img) & set(no_ship_imgs)) logger.info('num train img: {} (with ship: {} no shop: {})'.format( len(list_train_img), len(list_train_img_with_ship), len(list_train_img_no_ship))) list_valid_img_with_ship = list(set(list_valid_img) - set(no_ship_imgs)) list_valid_img_no_ship = list(set(list_valid_img) & set(no_ship_imgs)) logger.info('num valid img: {} (with ship: {} no shop: {})'.format( len(list_valid_img), len(list_valid_img_with_ship), len(list_valid_img_no_ship))) # build model model = build_model() # optimizer optimizer = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=1e-4) os.makedirs('./_models', exist_ok=True) # train for each epoch for ep in range(params.num_epoch): logger.info('') logger.info('==> start epoch {}'.format(ep)) # train model = train_main(model, optimizer, list_train_img_with_ship, list_train_img_no_ship, num_iter, epoch=ep) # validate validate(model, list_valid_img_with_ship, epoch=ep) # change learning rate if ep == 20 or ep == 30 or ep == 31: for param_group in optimizer.param_groups: param_group['lr'] *= 0.1 print('change learning rate into: {:.6f}'.format( param_group['lr'])) # save model torch.save(model, '_models/model_ep{}.pt'.format(ep)) # save model torch.save(model, '_models/model.pt') # show elapsed time toc = time.time() - tic logger.info('Elapsed time: {:.1f} [min]'.format(toc / 60.0))