Exemplo n.º 1
0
def main(_):
    args = flags.FLAGS

    print("\nParameters:")
    for attr, value in sorted(args.__flags.items()):
        print("{}={}".format(attr.upper(), value))
    print("")
    m(args)
Exemplo n.º 2
0
def main(_):
  if config.debug:
    #config.mode = "check"
    config.num_batches = 100
    config.log_period = 1
    config.save_period = 1
    config.eval_period = 1
    config.batch_size = 2
    config.val_num_batches = 3
    config.out_dir = "debug"
  #print(config.test_batch_size)
  if config.model_name.endswith("flat"):  
    if config.data_from=="reuters": config.n_classes = 18
    if config.data_from=="20newsgroup": config.n_classes = 20
    config.multilabel_threshold = 0.053
  else:
    if config.data_from=="reuters": config.n_classes = 21
    if config.data_from=="20newsgroup": config.n_classes = 29
    
  if config.data_from == "reuters":
    config.max_docs_length = 818
    config.tree1 = np.array([2,3,4,5,6,7,8])
    config.tree2 = np.array([9,10,11,12,13,14,15])
    config.tree3 = np.array([16,17,18,19])
    config.layer1 = np.array([2, 9, 16])
    config.layer2 = np.array([3, 4, 10, 11, 17, 19])
    config.layer3 = np.array([5, 6, 7, 8, 12, 13, 14, 15, 18])
  
  if config.data_from == "20newsgroup":
    config.test_batch_size = 26
    config.max_docs_length = 1000
    config.max_seq_length = 3
    config.tree1 = np.array([22,3,4,5,6,7])
    config.tree2 = np.array([23,9,10,11,12])
    config.tree3 = np.array([24,13,14,15,16])
    config.tree4 = np.array([25,8])
    config.tree5 = np.array([26,10,18,19])
    config.tree6 = np.array([27,21,2,17])
    config.layer1 = np.array([22,23,24,25,26,27])
    config.layer2 = np.array([3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21])

  config.save_dir = os.path.join(config.out_dir, "save")
  config.log_dir = os.path.join(config.out_dir, "log")
  if not os.path.exists(config.out_dir):  # or os.path.isfile(config.out_dir):
    os.makedirs(config.out_dir)
  if not os.path.exists(config.save_dir):
    os.mkdir(config.save_dir)
  if not os.path.exists(config.log_dir):
    os.mkdir(config.log_dir)
  os.environ["CUDA_VISIBLE_DEVICES"]=config.gpu_ids
  m(config)
Exemplo n.º 3
0
def main():
    key_args = key_parser.parse_known_args()[0]
    print(key_args)
    args = all_parser.parse_args()
    args.name = args.name.replace("-", "_")

    if args.mode == "train_eval":
        assert args.data_name is not None
        assert args.hparams_path is not None

    run_id, save_dir = prepare_env(args)
    # set distribution env
    if torch.cuda.is_available():
        if args.is_dist:
            hvd.init()
            args.local_rank = hvd.local_rank()
            args.world_size = hvd.size()
            print('local rank:', args.local_rank, 'world size',
                  args.world_size)
            if args.local_rank != 0:
                args.main_process = False
            torch.cuda.set_device(args.local_rank)
            seed = args.local_rank
        else:
            torch.cuda.set_device('cuda:%d' % int(args.gpu_id))
            seed = args.seed
            # seed = args.gpu_id
        torch.manual_seed(seed)
        torch.cuda.manual_seed(seed)
        np.random.seed(seed)  # numpy pseudo-random generator
        random.seed(seed)  # `python` built-in pseudo-random generator
    else:
        raise RuntimeError('gpu is not available')

    # os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id
    if args.run_id is None:
        # hparams = edict(json.loads(open(args.hparams_path, "r").read()))
        hparams = json.loads(open(args.hparams_path, "r").read())
        if args.main_process:
            dump_src(save_dir)
            dump_env(vars(args), save_dir, "args.json")
            dump_env(hparams, save_dir, "hparams.json")
    else:
        args, hparams = load_saved_env(
            args, save_dir)  # TODO some process may not have $savedir
    m(args, str(save_dir), hparams=hparams)
Exemplo n.º 4
0
def main(job_id, params):
    from main import main as m
    with file("config.json") as fin:
        params_ = json.load(fin)

    params['experiment_name'] = params_['experiment-name']
    params['dataset'] = params_['dataset']
    return m(job_id, params)
Exemplo n.º 5
0
def main(_):
    config = parser.parse_args()
    os.environ["CUDA_VISIBLE_DEVICES"] = config.gpu

    m(config)
Exemplo n.º 6
0
def main(_):
    config = flags.FLAGS

    config.out_dir = os.path.join(config.out_base_dir, config.model_name,
                                  str(config.run_id).zfill(2))
    m(config)
Exemplo n.º 7
0
def main(_):
    config = flags.FLAGS
    os.environ["CUDA_VISIBLE_DEVICES"] = str(config.gpu)

    m(config)
Exemplo n.º 8
0
def main(_):
    # assert (config.clftype=="") & (not config.model_name.endswith("flat"))
    if config.debug:
        #config.mode = "check"
        config.num_batches = 100
        config.log_period = 1
        config.save_period = 1
        config.eval_period = 1
        config.batch_size = 2
        config.val_num_batches = 3
    #print(config.test_batch_size)
    if config.model_name.endswith("flat"):
        if config.data_from == "reuters": config.clftype = "flat"
        config.eval_layers = False
        config.eval_trees = False
        if config.data_from == "reuters": config.fn_classes = 18
        if config.data_from == "20newsgroup": config.fn_classes = 20
        if config.data_from == "ice":
            config.fn_classes = 645
            config.learning_rate = 0.001
        config.thred = 0.053
    else:
        if config.data_from == "reuters": config.hn_classes = 21
        if config.data_from == "ice": config.hn_classes = 648
        if config.data_from == "20newsgroup": config.hn_classes = 29

    if config.data_from == "reuters":
        config.max_docs_length = 818
        config.tree1 = np.array([2, 3, 4, 5, 6, 7, 8])
        config.tree2 = np.array([9, 10, 11, 12, 13, 14, 15])
        config.tree3 = np.array([16, 17, 18, 19])
        config.layer1 = np.array([2, 9, 16])
        config.layer2 = np.array([3, 4, 10, 11, 17, 19])
        config.layer3 = np.array([5, 6, 7, 8, 12, 13, 14, 15, 18])

    if config.data_from == "20newsgroup":
        if config.mode == "train": config.val_num_batches = 3
        config.EOS = 28
        config.test_batch_size = 100
        config.max_docs_length = 1000
        config.max_seq_length = 2
        config.tree1 = np.array([22, 3, 4, 5, 6, 7])
        config.tree2 = np.array([23, 9, 10, 11, 12])
        config.tree3 = np.array([24, 13, 14, 15, 16])
        config.tree4 = np.array([25, 8])
        config.tree5 = np.array([26, 10, 18, 19])
        config.tree6 = np.array([27, 21, 2, 17])
        config.layer1 = np.array([22, 23, 24, 25, 26, 27])
        config.layer2 = np.array([
            3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21
        ])

    if config.data_from == "ice":
        if config.mode == "train": config.val_num_batches = 3
        config.EOS = 647
        config.batch_size = 600
        config.test_batch_size = 600
        config.max_docs_length = 48
        config.max_seq_length = 8
        config.eval_trees = False
        config.eval_layers = False
        config.num_batches = 80000


#   define tree1/tree2/layer1/layer2

    config.out_dir = os.path.join("../data/out", config.out_dir)
    config.save_dir = os.path.join(config.out_dir, "save")
    config.log_dir = os.path.join(config.out_dir, "log")
    if not os.path.exists(
            config.out_dir):  # or os.path.isfile(config.out_dir):
        os.makedirs(config.out_dir)
    if not os.path.exists(config.save_dir):
        os.mkdir(config.save_dir)
    if not os.path.exists(config.log_dir):
        os.mkdir(config.log_dir)
    os.environ["CUDA_VISIBLE_DEVICES"] = config.gpu_ids
    m(config)
Exemplo n.º 9
0
def main(_):
    config = flags.FLAGS
    # 위와 같이 정의된 값들은 flags.FLAGS 를 통해서 어디에서든지 호출하여 사용
    os.environ["CUDA_VISIBLE_DEVICES"] = str(config.gpu)

    m(config)
Exemplo n.º 10
0
def main(_):
    print(argv)
    config = flags.FLAGS
    m(config)