예제 #1
0
def main():
    args = parser.parse_args()
    # add arguments that will be used for meta training.
    # train_args = [
    #   'meta_optim', 'lr', 'no_mask', 'k_obsrv',
    #   'mask_mode', 'data_mode',
    #   ]

    # test_args = [
    #   'no_mask', 'k_obsrv',
    #   'mask_mode',
    #   ]
    # train_args = {k: v for k, v in vars(args).items() if k in train_args}

    # test_args = {k: v for k, v in vars(args).items() if k in test_args}
    # set CUDA
    args.cuda = not args.cpu and torch.cuda.is_available()
    C.set_cuda(args.cuda)
    # set load dir
    load_dir = os.path.join(args.result_dir, args.load_dir)
    if args.load_dir and not os.path.exists(load_dir):
        raise Exception(f'{load_dir} does NOT exist!')
    # set save dir: saving functions will be suppressed if save_dir is None
    args.result_dir = None if args.volatile else args.result_dir
    args.save_dir = utils.prepare_dir(args.problem, args.result_dir,
                                      args.save_dir)
    if not args.test_optim:
        args.test_optim = args.train_optim
        print('Test optimizers are not specified.\n'
              'All the optimizers set to be trained '
              f'will be automatically on the test list: {args.train_optim}\n')
    # set problem & config
    print(f'Problem: {args.problem}')
    cfg = Config(getConfig(args))
    cfg.update_from_parsed_args(args)
    cfg.save(args.save_dir)

    problem = cfg.problem.dict
    neural_optimizers = cfg.neural_optimizers.dict
    normal_optimizers = cfg.normal_optimizers.dict
    test_optimizers = cfg.test_optimizers
    # TODO: force to call this by wrapping it in class
    if args.retest_all:
        args.retest = test_optimizers

    params = {}
    ##############################################################################
    print('\nMeta-training..')
    for name in [opt for opt in test_optimizers if opt in neural_optimizers]:
        if name not in args.retrain and OptimizerParams.is_loadable(
                name, load_dir):
            params[name] = OptimizerParams.load(name,
                                                load_dir).save(name, save_dir)
        else:
            print(f"\nTraining neural optimizer: {name}")

            kwargs = neural_optimizers[name]['train_args']
            # kwargs.update(cfg.args.get_by_names(train_args))
            # print(f"Module name: {kwargs['optim_module']}")
            params[name] = train_neural(name, cfg)
    ##############################################################################
    print('\n\n\nMeta-testing..')
    results = {}
    for name in test_optimizers:
        # np.random.seed(0)
        # if not utils.is_loadable_result(load_dir, name, args.force_retest):
        if name not in args.retest and ResultDict.is_loadable(name, load_dir):
            results[name] = ResultDict.load(name,
                                            load_dir).save(name, save_dir)
        else:
            if name in normal_optimizers:
                print(f'\nOptimizing with static optimizer: {name}')
                # kwargs = normal_optimizers[name]
                result = test_normal(name, cfg)
                lr_list = [
                    1.0, 0.3, 0.1, 0.03, 0.01, 0.003, 0.001, 0.0003, 0.0001
                ]
                # best_loss, best_lr = find_best_lr(name, save_dir, lr_list, **problem, **kwargs)
                results[name] = result
            elif name in neural_optimizers:
                print(f'\n\nOptimizing with learned optimizer: {name}')

                # kwargs = neural_optimizers[name]['test_args']
                # kwargs.update(cfg.args.get_by_names(test_args))
                # print(f"Module name: {kwargs['optim_module']}")
                result = test_neural(name, cfg, params[name])
                results[name] = result

    ##############################################################################
    print('End of program.')
예제 #2
0
def main():
  args = parser.parse_args()
  results = {}
  for name in plot_names:
    if ResultDict.is_loadable(name, args.load_dir):
      results[name] = ResultDict.load(name, args.load_dir)
    else:
      raise Exception(f"Unalbe to find result name: {name}")

  df_loss = pd.DataFrame()
  for name, result in results.items():
    df_loss = df_loss.append(
      result.data_frame(name, ['test_num', 'step_num']), sort=True)

  df_grad = pd.DataFrame()
  for name, result in results.items():
    df_grad = df_grad.append(
      result.data_frame(name, ['test_num', 'step_num', 'track_num']), sort=True)
  df_grad = df_grad[df_grad['grad'].abs() > 0.00001]
  df_grad = df_grad[df_grad['grad'].abs() < 0.001]
  df_grad['grad'] = df_grad['grad'].abs()
  df_grad['update'] = df_grad['update'].abs()

  sns.set(color_codes=True)
  sns.set_style('white')
  # import pdb; pdb.set_trace()

  grouped = df_loss.groupby(['optimizer', 'step_num'])
  df_loss = grouped.mean().reset_index()

  grouped = df_grad.groupby(['optimizer', 'step_num'])
  df_grad = grouped.mean().reset_index()

  time_init = df_loss['walltime'].min()
  time_delay = 1
  time_ratio = 0.05


  plt.ion()
  num_loop = 0
  fig = plt.figure(figsize=(20, 13))
  axes = [fig.add_subplot(2, 2, i) for i in range(1, 5)]

  def animate(i):
    for ax in axes:
      ax.clear()

    time = watch.touch('cumulative')
    time_ref = time_init + (time - time_delay) * time_ratio
    df = df_loss[df_loss['walltime'] <= time_ref]
    #df = data_frame[data_frame['step_num'] <= i]
    for j, name in enumerate(plot_names):
      y = df[df['optimizer'] == name]['loss']
      x = df[df['optimizer'] == name]['step_num']
      if name == 'LSTM-base':
        name = 'Proposed'
      axes[0].plot(x, y, label=name, color=f'C{j}')
      axes[0].set_xlim([0, 200])
      axes[0].set_ylim([0.2, 3])
      axes[0].legend()
      axes[0].set_yscale('log')
      axes[0].set_xlabel('Step_num')
      axes[0].set_ylabel('Loss')
      axes[0].set_title('Loss w.r.t. num_step')

    for j, name in enumerate(plot_names):
      y = df[df['optimizer'] == name]['loss']
      x = df[df['optimizer'] == name]['walltime']
      if name == 'LSTM-base':
        name = 'Proposed'
      axes[1].plot(x, y, label=name, color=f'C{j}')
      axes[1].set_ylim([0.2, 3])
      axes[1].legend()
      axes[1].set_yscale('log')
      axes[1].set_xlabel('Walltime')
      axes[1].set_ylabel('Loss')
      axes[1].set_title('Loss w.r.t. walltime')

    df = df_grad[df_loss['walltime'] <= time_ref]

    for j, name in enumerate(plot_names):
      y = df[df['optimizer'] == name]['grad']
      x = df[df['optimizer'] == name]['step_num']
      if name == 'LSTM-base':
        name = 'Proposed'
      axes[2].plot(x, y, label=name, color=f'C{j}')
      axes[2].set_xlim([0, 200])
      # axes[2].set_ylim([0.2, 3])
      axes[2].legend()
      axes[2].set_xlabel('Step_num')
      axes[2].set_ylabel('Gradient')
      axes[2].set_title('Gradient w.r.t. step_num')

    for j, name in enumerate(plot_names):
      y = df[df['optimizer'] == name]['update']    
      x = df[df['optimizer'] == name]['step_num']
      if name == 'LSTM-base':
        name = 'Proposed'
        y *= 0.1
      axes[3].plot(x, y, label=name, color=f'C{j}')
      axes[3].set_xlim([0, 200])
      # axes[3].set_ylim([0.2, 3])
      axes[3].legend()
      axes[3].set_xlabel('Step_num')
      axes[3].set_ylabel('Update')
      axes[3].set_title('Update w.r.t. step_num')




  watch = StopWatch('Realtime')
  ani = animation.FuncAnimation(fig, animate, interval=1)
  plt.show()

  import pdb; pdb.set_trace()
예제 #3
0
def main():
    global optimizer_names
    args = parser.parse_args()
    # set load dir
    load_dir = os.path.join(args.result_dir, args.load_dir)
    if args.load_dir and not os.path.exists(load_dir):
        raise Exception(f'{load_dir} does NOT exist!')
    cfg = Config(getConfig(args.problem))
    cfg.update_from_parsed_args(args)
    problem = cfg.problem.dict
    neural_optimizers = cfg.neural_optimizers.dict
    normal_optimizers = cfg.normal_optimizers.dict
    test_optimizers = cfg.test_optimizers
    if args.retest_all:
        args.retest = test_optimizers

    ##############################################################################
    print('\n\n\nLoading saved optimizers..')
    params = {}
    results = {}

    if args.subdirs:
        # dirs = os.listdir(load_dir)
        dirs = [
            # 'sparsity_0.1',
            # 'sparsity_0.05',
            # 'sparsity_0.01',
            'drop_rate_0.0_no_mask',
            'drop_rate_0.5_no_relative_params_10_obsrv_grad_clip',
            # 'sparsity_0.001',
            # 'sparsity_0.0005',
            'dynamic_scaling_g_only_no_dropout_no_mask_time_encoding',
            # 'sparsity_0.00005',
            # 'sparsity_0.00001',class
            # 'sparsity_0.000005',
            'sparsity_0.000001',
            # 'baseline'
        ]
        dir_name = product(dirs, optimizer_names)
        optimizer_names = [os.path.join(dir, name) for (dir, name) in dir_name]

    import pdb
    pdb.set_trace()

    for name in optimizer_names:
        # np.random.seed(0)
        # if not utils.is_loadable_result(load_dir, name, args.force_retest):
        if OptimizerParams.is_loadable(name, load_dir):
            params[name] = OptimizerParams.load(name, load_dir)

        subname = name.split('/')[-1]
        if ResultDict.is_loadable(name, load_dir):
            results[name] = ResultDict.load(name, load_dir)
        else:
            if subname not in args.retest:
                continue
            if subname in normal_optimizers:
                print(f'\n\nOptimizing with static optimizer: {name}')
                kwargs = normal_optimizers[subname]
                result = test_normal(name, load_dir, **problem, **kwargs)
                results[name] = result
            elif subname in neural_optimizers:
                print(f'\n\nOptimizing with learned optimizer: {name}')
                kwargs = neural_optimizers[subname]['test_args']
                result = test_neural(name, load_dir, params[name], **problem,
                                     **kwargs)
                results[name] = result

    ##############################################################################
    print('\nPlotting..')
    plotter = utils.Plotter(title=args.title, results=results, out_dir=None)
    # loss w.r.t. step_num
    # plotter.plot('grad_value', 'step_num', ['test_num', 'step_num', 'track_num'])
    plotter.plot('step_num',
                 'loss', ['test_num', 'step_num'],
                 hue='optimizer',
                 logscale=True)
    # mean_group=['optimizer', 'step_num'])
    import pdb
    pdb.set_trace()
    plotter.plot('step_num', 'grad', ['test_num', 'step_num', 'track_num'])

    # loss w.r.t. walltime
    # plotter.plot('walltime', 'loss', ['test_num', 'step_num'],
    #              hue='optimizer',
    #              mean_group=['optimizer', 'step_num'])

    # grad_loss w.r.t. step_num (for specified neural opt name)
    # plotter.plot('step_num', 'grad_loss', ['test_num', 'step_num'],
    #              mean_group=['optimizer', 'step_num'],
    #              visible_names=['LSTM-ours'])

    # update w.r.t. step_num (for specified neural opt name)
    plotter.plot('step_num',
                 'update', ['test_num', 'step_num', 'track_num'],
                 visible_names=['LSTM-base', 'LSTM-ours'])

    # grad w.r.t. step_num
    plotter.plot('step_num',
                 'grad', ['test_num', 'step_num', 'track_num'],
                 visible_names=['LSTM-base', 'LSTM-ours'])

    # mu w.r.t. step_num
    plotter.plot('step_num',
                 'mu', ['test_num', 'step_num', 'track_num'],
                 visible_names=['LSTM-ours'])

    # sigma w.r.t. step_num
    plotter.plot('step_num',
                 'sigma', ['test_num', 'step_num', 'track_num'],
                 visible_names=['LSTM-ours'])

    # grad w.r.t. step_num (for specified neural opt name)
    # plotter.plot('step_num', 'grad_pred', ['test_num', 'step_num', 'track_num'],
    #              visible_names=['LSTM-ours'])
    # plotter.plot('step_num', 'grad_value', ['test_num', 'step_num', 'track_num'],
    #              hue='grad_type', visible_names=['LSTM-ours'])
    print('end')