Ejemplo n.º 1
0
def main():
    args = parse_args()
    if not args.list and not args.experiments:
        print "Nothing to do: no experiments passed."
        print ("Did you forget the '-e' flag? Run 'python main.py --help' for "
               "more info.")
        return

    if args.list:
        print "Available experiments:"
        for exp in list_experiments(args.exp_home):
            print "\t", exp
        return

    for exp in args.experiments:
        run_experiment(
            exp,
            exp_home=args.exp_home,
            out_dir=args.outdir,
            db_name=args.dbname,
            do_run=not args.plot_only,
            do_plot=not args.run_only,
            overwrite=not args.no_overwrite,
            partition=args.partition
        )
Ejemplo n.º 2
0
def _main_(args):
    # ======================
    # BASE PARAMETERS
    # ======================
    print(args)
    #os.nice(19)
    fit_params = {
        'model': args.model,
        'lr': args.lr,
        'epochs': args.epoch,
        'reduce_lr_on_plateau': True
    }

    data_params = {
        'use_sampling': args.sa,
        'batch_size': args.batch_size,
        'buffer_size': 3000,
        'augmentations': args.augmentations
    }

    model_params = {
        'channels': ['B4', 'B3', 'B2', 'AVE'],
        'target_size': [257, 257],
        'num_classes': 2,
        'weights': 'imagenet'
    }

    config = {
        'fit_params': fit_params,
        'data_params': data_params,
        'model_params': model_params
    }

    run_experiment(config)
Ejemplo n.º 3
0
def primary_checks(args):

    # TODO Improve the way of checking stuff - maybe more parameters will be added
    if(os.stat("config.ini").st_size == 0 and ('classifier' not in args) and ('scaler' not in args) and ('split' not in args)):
        print('config file is empty and no parameters were supplied')
        return

    if(os.stat("config.ini").st_size != 0 and ('classifier' not in args) and ('scaler' not in args) and ('split' not in args)):
        print("no parameters where supplied - using saved parameters")

    if(('save_options' in args) and args['save_options'] == False):
        scaler, classifier, split, data, labels, plots = options_compose(
            args)

        run_experiment(scaler, classifier, split, data, labels, plots)
        return 0

    if(('save_options' in args) and args['save_options'] == True):
        print('saved and moved on')
        options = save_options(args)
        # print(options)

    config_values = read_config()

    if(config_values == {}):
        print('No supperted parameteres in config file')
        return
    if(config_values == 1):
        print('Wrong value(s) type to config file')
        return 1

    run_experiment(
        config_values['scaler'], config_values['classifier'], config_values['split'], config_values['data'],
        config_values['labels'], config_values['plots'])
    return 0
 def route_session_run():
     try:
         experiment.run_experiment()
         return flask.Response("ok")
     except Exception as e:
         log.exception("Exception while running session:")
         flask.abort(500, e)
Ejemplo n.º 5
0
def mp_worker(data):
    weight_decay = data
    dim = 2
    train_seed = 1683
    loss_fn = [{'loss_fn': lossfn.get_mse_loss(), 'weight': 1, 'label': 'mse'}]
    train_range = range(10, 21, 1)
    for n_train in train_range:
        checkpoint_dir = 'checkpoints/'
        checkpoint_dir += 'checkpoints_weight_decay_mse/'
        checkpoint_dir += 'checkpoint_dim-{}_ntrain-{}_weightdecay-{}_seed-{}'.format(
            dim, n_train, weight_decay, train_seed)
        run_experiment(dim, n_train, train_seed, loss_fn, weight_decay,
                       checkpoint_dir)
Ejemplo n.º 6
0
def main():

    usage = "%prog project logfile "
    parser = OptionParser(usage=usage)
    parser.add_option('-n', dest='new_name', default=None,
                      help='New name for experiment: default= old name + _rerun')
    #parser.add_option('--boolarg', action="store_true", dest="boolarg", default=False,
    #                  help='Keyword argument: default=%default')


    (options, args) = parser.parse_args()
    project = args[0]
    log_filename = args[1]
    new_name = options.new_name

    log = fh.read_json(log_filename)
    if new_name is None:
        new_name = log['name'] + '_rerun'

    log['name'] = new_name

    float_vars = ['best_alpha', 'alpha_exp_base', 'max_alpha_exp', 'min_alpha_exp', 'orig_T', 'tau']
    for v in float_vars:
        if v in log:
            if log[v] is not None:
                log[v] = float(log[v])
            else:
                log[v] = None
    #if log['reuse'] == 'False':
    #    log['reuse'] = False
    #else:
    #    log['reuse'] = True
    # convert list stirng to list
    #list_vars = ['feature_list', 'additional_label_files', 'additional_label_weights']
    #for v in list_vars:
    #    if v in log:
    #        print v
    #        print log[v]
    #        quoted_strings = [p.strip() for p in log[v][1:-1].split(',')]
    #        print quoted_strings
    #        log[v] = [p[1:-1] for p in quoted_strings]
    #        print log[v]
    #        print '\n'
    #print log
    #if 'additional_label_weights' in log:
    #    log['additional_label_weights'] = [float(w) for w in log['additional_label_weights']]

    dirs.make_base_dir(project)

    print log
    result = experiment.run_experiment(**log)
    print result
Ejemplo n.º 7
0
def run(tracker_params):
    rigid_mode = isinstance(tracker_params, rigid_tracker.RigidTrackerParams)

    if args.fake:
        if rigid_mode:
            print tracker_params.__dict__
        else:
            print tracker_params.flow_rigidity_coeff
        return

    mod_class_list = args.experiment_class.split('.')
    assert len(mod_class_list) == 2
    module_name, class_name = mod_class_list
    print 'Running experiment', class_name, 'in module', module_name
    import importlib
    mod = importlib.import_module(module_name)
    ex_class = getattr(mod, class_name)
    ex = ex_class.Create(args.argstr)

    # import matplotlib
    # if not args.show_plot: matplotlib.use('Agg')
    # import matplotlib.pyplot as plt

    def iter_callback(i, data):
        return
        # timb.plot_problem_data(
        #   plt,
        #   tracker_params.tsdf_trunc_dist,
        #   ex.get_grid_params(),
        #   ex.get_state(i),
        #   data['obs_tsdf'], data['obs_weight'],
        #   data['curr_phi'], data['curr_weight'],
        #   data['problem_data']['result'], data['problem_data']['opt_result'],
        #   data['new_phi'], data['new_weight'], data['output']
        # )
        # if args.output_dir is None:
        #   plt.show()
        # else:
        #   plt.savefig('%s/plots_%d.png' % (args.output_dir, i), bbox_inches='tight')

    t_start = time.time()
    if rigid_mode:
        ex_log = experiment.run_experiment_rigid(ex, tracker_params,
                                                 iter_callback, args.iter_cap)
    else:
        ex_log = experiment.run_experiment(ex, tracker_params, iter_callback,
                                           args.iter_cap)
    t_elapsed = time.time() - t_start

    import os
    import cPickle
    import uuid
    from datetime import datetime
    output_filename = os.path.join(args.output_dir,
                                   str(uuid.uuid4()) + '.log.pkl')
    print 'Writing to', output_filename
    out = {
        'rigid': rigid_mode,
        'tracker_params': tracker_params,
        'grid_params': ex.get_grid_params(),
        'log': ex_log,
        'datetime': datetime.now(),
        'time_elapsed': t_elapsed,
    }
    with open(output_filename, 'w') as f:
        cPickle.dump(out, f, cPickle.HIGHEST_PROTOCOL)
# Setting p and number of repetitions per p
repetitions = 10
p_min, p_max = 1, 3

# Optimizer (minimizer_kwargs)
optimizer = {'method': 'Nelder-Mead', 'options': {'ftol': 1.0e-2, 'xtol': 1.0e-2,'disp': False}}


# Producing the data
for key, value in graphs.items():
    graph_name = key
    G = value
    qubits = [int(n) for n in list(G.nodes)]
    for p in range(p_min,p_max + 1):
        output = pd.DataFrame()
    
        # specifications
        specs = {'graph': graph_name, 'p': p, 'optimizer': optimizer['method'], 'backend': backend_name, 'noise': False}
    
        for i in range(repetitions):
            # Running an experiment
            print("\nExperiment "+str(i+1)+"/"+str(repetitions)+", p = "+str(p))
    
            result = run_experiment(G, p, optimizer, qvm, print_result = True)
            result.update(specs) #adding specifications to the dictionary
    
            output = output.append(result, ignore_index=True)
    
        title = 'pyquil-'+ specs['graph'] +'-'+specs['backend']+'-noiseless-p'+str(p)
        output.to_csv('data/'+title+'.csv')
Ejemplo n.º 9
0
        return (res[0].legacyId, res[0].firstName, res[0].lastName)
    else:
        return (None, None, None)


def remove_initials(s: str) -> str:
    return ' '.join([segment for segment in s.split(' ') if len(segment) > 1])


# From: https://stackoverflow.com/a/266162
def remove_punctuation(x: str) -> str:
    # some punctuation can be important to names
    # so we keep `-\'`
    return str_without(x, str_without(string.punctuation, '-\''))


def str_without(s: str, without: str) -> str:
    regex = re.compile('[%s]' % re.escape(without))
    return regex.sub('', s)


def remove_suffixes(s: str) -> str:
    return ' '.join(
        [segment for segment in s.split(' ') if segment not in SUFFIXES])


SUFFIXES = ['PhD', 'Jr', 'II', 'III', 'IV']

if __name__ == '__main__':
    run_experiment('../experiment_2', mapping, delay=0)
Ejemplo n.º 10
0
                    num_epoch=num_epoch,
                    lr_type='decay',
                    initial_lr=0.1,
                    tau=d,
                    regularizer=1 / n,
                    quantization='full',
                    # coordinates_to_keep=20,
                    n_cores=n_cores,
                    method='ad-psgd',
                    split_data_random_seed=random_seed,
                    distribute_data=True,
                    split_data_strategy=split_name,
                    topology='ring',
                    estimate='final')
            ]
        run_experiment("dump/epsilon-final-decentralized-ring-" + split_way+ "-" + str(n_cores)\
                       + "/", dataset_path, params, nproc=10)

    # AD-PSGD on ring topology
    if args.experiment in ['final']:
        params = []
        for random_seed in np.arange(1, n_repeat + 1):
            params += [
                Parameters(name="AD-PSGD-random",
                           num_epoch=num_epoch,
                           lr_type='decay',
                           initial_lr=0.1,
                           tau=d,
                           regularizer=1 / n,
                           quantization='random-unbiased',
                           coordinates_to_keep=20,
                           n_cores=n_cores,
Ejemplo n.º 11
0
# List for results
results = []

# Run optimizations
for lam in lam_list:

    # Get model
    model = ParallelLSTMDecoding(p_in, p_out, hidden_size, hidden_layers,
                                 fc_units, lr, opt_type, lam)

    # Run experiment
    train_loss, val_loss, weights_list, forecasts_train, forecasts_val = run_experiment(
        model,
        X_train,
        Y_train,
        X_val,
        Y_val,
        nepoch,
        mbsize=mbsize,
        predictions=True,
        loss_check=1)

    # Create GC estimate grid
    GC_est = np.zeros((p_out, p_in))
    for target in range(p_out):
        W = weights_list[target]
        for candidate in range(p_in):
            start = candidate * lag
            end = (candidate + 1) * lag
            GC_est[target, candidate] = np.linalg.norm(W[:,
                                                         range(start, end)],
                                                       ord=2)
Ejemplo n.º 12
0
            logFile.write("********* " + timestamp + "\n")
            logFile.write("Running experiment: " + dataset + " - " +
                          leakage_model.name + " - " + dim_rdc_name)
            logFile.write("\n")
            logFile.write("Dataset size: train - " + str(len(trainSize[1])) +
                          ", test - " + str(len(testSize[1])))
            logFile.write("\n")

            if PREVIEW:
                continue

            # try to run experimnt
            ge = sr = gridSearch_res = fail_msg = None

            try:
                ge, sr, gridSearch_res = run_experiment(
                    data, aes_output_fn, dim_rdc, param_dict, GE_N_EXPERIMENTS)
            except Exception as e:
                print("Experiment failed:", e)
                fail_msg = str(e)

            # report to file
            # create file
            fileName = dataset + "_" + leakage_model.name + "_" + dim_rdc_name + ".txt"
            resFile = open("results/" + folderName + "/" + fileName, "w+")

            # write to file
            if fail_msg:
                resFile.write("!!!Experiment failed: " + fail_msg)
                logFile.write("!!!Experiment failed: " + fail_msg)
                logFile.write("\n")
            else:
Ejemplo n.º 13
0
#!/usr/bin/env python3
from experiment import run_experiment
from experiment_3 import mapping

run_experiment('../instructors', mapping, delay=0)
Ejemplo n.º 14
0
def run(tracker_params):
  rigid_mode = isinstance(tracker_params, rigid_tracker.RigidTrackerParams)

  if args.fake:
    if rigid_mode:
      print tracker_params.__dict__
    else:
      print tracker_params.flow_rigidity_coeff
    return

  mod_class_list = args.experiment_class.split('.')
  assert len(mod_class_list) == 2
  module_name, class_name = mod_class_list
  print 'Running experiment', class_name, 'in module', module_name
  import importlib
  mod = importlib.import_module(module_name)
  ex_class = getattr(mod, class_name)
  ex = ex_class.Create(args.argstr)

  # import matplotlib
  # if not args.show_plot: matplotlib.use('Agg')
  # import matplotlib.pyplot as plt

  def iter_callback(i, data):
    return
    # timb.plot_problem_data(
    #   plt,
    #   tracker_params.tsdf_trunc_dist,
    #   ex.get_grid_params(),
    #   ex.get_state(i),
    #   data['obs_tsdf'], data['obs_weight'],
    #   data['curr_phi'], data['curr_weight'],
    #   data['problem_data']['result'], data['problem_data']['opt_result'],
    #   data['new_phi'], data['new_weight'], data['output']
    # )
    # if args.output_dir is None:
    #   plt.show()
    # else:
    #   plt.savefig('%s/plots_%d.png' % (args.output_dir, i), bbox_inches='tight')

  t_start = time.time()
  if rigid_mode:
    ex_log = experiment.run_experiment_rigid(ex, tracker_params, iter_callback, args.iter_cap)
  else:
    ex_log = experiment.run_experiment(ex, tracker_params, iter_callback, args.iter_cap)
  t_elapsed = time.time() - t_start

  import os
  import cPickle
  import uuid
  from datetime import datetime
  output_filename = os.path.join(args.output_dir, str(uuid.uuid4()) + '.log.pkl')
  print 'Writing to', output_filename
  out = {
    'rigid': rigid_mode,
    'tracker_params': tracker_params,
    'grid_params': ex.get_grid_params(),
    'log': ex_log,
    'datetime': datetime.now(),
    'time_elapsed': t_elapsed,
  }
  with open(output_filename, 'w') as f:
    cPickle.dump(out, f, cPickle.HIGHEST_PROTOCOL)
Ejemplo n.º 15
0
    optimum = brute_force(G)
    print("\nGraph " + str(i) + ": n = " + str(n), ", m = " + str(m),
          ", d = " + str(d), "the optimum cut is " + str(optimum) + "\n")

    if m > 0:  # Method does not work if there are no edges
        for p in range(p_min, p_max + 1):
            # specifications
            specs = {
                'graph_atlas index': i,
                'p': p,
                'optimizer': 'SPSA',
                'backend': backend.name(),
                'noise': False,
                'topology': False,
                'optimum': optimum
            }

            # Running an experiment
            print("\nGraph " + str(i), "p = " + str(p))
            result = run_experiment(G,
                                    p,
                                    optimizer,
                                    backend,
                                    print_result=True,
                                    n_shots=1000)
            result.update(specs)  #adding specifications to the dictionary

            # Writing output to file
            output = output.append(result, ignore_index=True)
            output.to_csv('data/qiskit-graph_atlas-nshots_1000.csv')
Ejemplo n.º 16
0
parser = argparse.ArgumentParser()
parser.add_argument('--dataset',
                    type=str,
                    default="compas",
                    help="dataset to run(compas, framingham)")
parser.add_argument('--eval_metric',
                    type=str,
                    default="xauc",
                    help="metric of ranking fairness, xauc or prf")
parser.add_argument(
    '--classifier',
    type=str,
    default="lr",
    help="classificaion model. lr for logistic regression, rb for rankboost")
parser.add_argument('--num_train',
                    type=int,
                    default=999999,
                    help="number of training examples")

if __name__ == "__main__":
    args = parser.parse_args()
    dataset, eval_metric, classifier, num_train = args.dataset, args.eval_metric, args.classifier, args.num_train
    # tee = subprocess.Popen(['tee', "results/{}_{}_{}_{}_sys.log".format(args.dataset,args.eval_metric,args.classifier,args.num_train)], stdin=subprocess.PIPE)
    # os.dup2(tee.stdin.fileno(), sys.stdout.fileno())
    # os.dup2(tee.stdin.fileno(), sys.stderr.fileno())

    print("Run experiment for classifier {}, metric {} on {} dataset".format(
        classifier, eval_metric, dataset))
    run_experiment(args)
                Parameters(name="decentralized-exact",
                           num_epoch=num_epoch,
                           lr_type='decay',
                           initial_lr=0.1,
                           tau=d,
                           regularizer=1 / n,
                           quantization='full',
                           n_cores=n_cores,
                           method='plain',
                           split_data_random_seed=random_seed,
                           distribute_data=True,
                           split_data_strategy=split_name,
                           topology='ring',
                           estimate='final'),
            ]
        run_experiment("dump/epsilon-final-decentralized-" + split_way + "-" +\
                       str(n_cores) + "/", dataset_path, params, nproc=10)

    if args.experiment in ['final']:
        params = []
        for random_seed in np.arange(1, n_repeat + 1):
            params += [
                Parameters(name="centralized",
                           num_epoch=num_epoch,
                           lr_type='decay',
                           initial_lr=0.1,
                           tau=d,
                           regularizer=1 / n,
                           quantization='full',
                           n_cores=n_cores,
                           method='plain',
                           split_data_random_seed=random_seed,
Ejemplo n.º 18
0
        specs = {
            'graph': graph_name,
            'p': p,
            'optimizer': 'NELDER_MEAD',
            'backend': 'qasm_simulator',
            'simulated backend': backend.name(),
            'noise': True,
            'topology': True
        }

        for i in range(repetitions):
            # Running an experiment
            print("\nExperiment " + str(i + 1) + "/" + str(repetitions) +
                  ", p = " + str(p))

            result = run_experiment(G,
                                    p,
                                    optimizer,
                                    Aer.get_backend('qasm_simulator'),
                                    print_result=True,
                                    noise_model=noise_model,
                                    coupling_map=coupling_map,
                                    basis_gates=basis_gates)
            result.update(specs)  #adding specifications to the dictionary

            output = output.append(result, ignore_index=True)

        file_title = 'qiskit-' + specs['graph'] + '-' + specs[
            'simulated backend'] + '-on-' + specs['backend'] + '-p' + str(p)
        output.to_csv('data/' + file_title + '.csv')
Ejemplo n.º 19
0
def experiment_wrapper(parameter_file):
    run_experiment(parameter_file)
Ejemplo n.º 20
0
def main():

    parser = argparse.ArgumentParser(
        'a program to train or run a deep q-learning agent')
    parser.add_argument("game", type=str, help="name of game to play")
    parser.add_argument("agent_type",
                        type=str,
                        help="name of learning/acting technique used")
    parser.add_argument("agent_name",
                        type=str,
                        help="unique name of this agent instance")
    parser.add_argument("--rom_path",
                        type=str,
                        help="path to directory containing atari game roms",
                        default='../roms')
    parser.add_argument(
        "--watch",
        help=
        "if true, a pretrained model with the specified name is loaded and tested with the game screen displayed",
        action='store_true')

    parser.add_argument("--epochs",
                        type=int,
                        help="number of epochs",
                        default=200)
    parser.add_argument("--epoch_length",
                        type=int,
                        help="number of steps in an epoch",
                        default=250000)
    parser.add_argument("--test_steps",
                        type=int,
                        help="max number of steps per test",
                        default=125000)
    parser.add_argument("--test_steps_hardcap",
                        type=int,
                        help="absolute max number of steps per test",
                        default=135000)
    parser.add_argument("--test_episodes",
                        type=int,
                        help="max number of episodes per test",
                        default=30)
    parser.add_argument("--history_length",
                        type=int,
                        help="number of frames in a state",
                        default=4)
    parser.add_argument("--training_frequency",
                        type=int,
                        help="number of steps run before training",
                        default=4)
    parser.add_argument(
        "--random_exploration_length",
        type=int,
        help=
        "number of randomly-generated experiences to initially fill experience memory",
        default=50000)
    parser.add_argument("--initial_exploration_rate",
                        type=float,
                        help="initial exploration rate",
                        default=1.0)
    parser.add_argument("--final_exploration_rate",
                        type=float,
                        help="final exploration rate from linear annealing",
                        default=0.1)
    parser.add_argument(
        "--final_exploration_frame",
        type=int,
        help="frame at which the final exploration rate is reached",
        default=1000000)
    parser.add_argument("--test_exploration_rate",
                        type=float,
                        help="exploration rate while testing",
                        default=0.05)
    parser.add_argument("--frame_skip",
                        type=int,
                        help="number of frames to repeat chosen action",
                        default=4)
    parser.add_argument("--screen_dims",
                        type=tuple,
                        help="dimensions to resize frames",
                        default=(84, 84))
    # used for stochasticity and to help prevent overfitting.
    # Must be greater than frame_skip * (observation_length -1) + buffer_length - 1
    parser.add_argument("--max_start_wait",
                        type=int,
                        help="max number of frames to wait for initial state",
                        default=60)
    # buffer_length = 1 prevents blending
    parser.add_argument("--buffer_length",
                        type=int,
                        help="length of buffer to blend frames",
                        default=2)
    parser.add_argument("--blend_method",
                        type=str,
                        help="method used to blend frames",
                        choices=('max'),
                        default='max')
    parser.add_argument("--reward_processing",
                        type=str,
                        help="method to process rewards",
                        choices=('clip', 'none'),
                        default='clip')
    # must set network_architecture to custom in order use custom architecture
    parser.add_argument(
        "--conv_kernel_shapes",
        type=tuple,
        help=
        "shapes of convnet kernels: ((height, width, in_channels, out_channels), (next layer))"
    )
    # must have same length as conv_kernel_shapes
    parser.add_argument(
        "--conv_strides",
        type=tuple,
        help="connvet strides: ((1, height, width, 1), (next layer))")
    # currently,  you must have at least one dense layer
    parser.add_argument(
        "--dense_layer_shapes",
        type=tuple,
        help="shapes of dense layers: ((in_size, out_size), (next layer))")
    parser.add_argument("--discount_factor",
                        type=float,
                        help="constant to discount future rewards",
                        default=0.99)
    parser.add_argument("--learning_rate",
                        type=float,
                        help="constant to scale parameter updates",
                        default=0.00025)
    parser.add_argument("--optimizer",
                        type=str,
                        help="optimization method for network",
                        choices=('rmsprop', 'graves_rmsprop'),
                        default='rmsprop')
    parser.add_argument("--rmsprop_decay",
                        type=float,
                        help="decay constant for moving average in rmsprop",
                        default=0.95)
    parser.add_argument("--rmsprop_epsilon",
                        type=int,
                        help="constant to stabilize rmsprop",
                        default=0.01)
    # set error_clipping to less than 0 to disable
    parser.add_argument(
        "--error_clipping",
        type=str,
        help="constant at which td-error becomes linear instead of quadratic",
        default=1.0)
    # set gradient clipping to 0 or less to disable.  Currently only works with graves_rmsprop.
    parser.add_argument("--gradient_clip",
                        type=str,
                        help="clip gradients to have the provided L2-norm",
                        default=0)
    parser.add_argument("--target_update_frequency",
                        type=int,
                        help="number of steps between target network updates",
                        default=10000)
    parser.add_argument(
        "--memory_capacity",
        type=int,
        help="max number of experiences to store in experience memory",
        default=1000000)
    parser.add_argument(
        "--batch_size",
        type=int,
        help="number of transitions sampled from memory during learning",
        default=32)
    # must set to custom in order to specify custom architecture
    parser.add_argument("--network_architecture",
                        type=str,
                        help="name of prespecified network architecture",
                        choices=("deepmind_nips", "deepmind_nature, custom"),
                        default="deepmind_nature")
    parser.add_argument("--recording_frequency",
                        type=int,
                        help="number of steps before tensorboard recording",
                        default=50000)

    parser.add_argument("--saving_threshold",
                        type=int,
                        help="min score threshold for saving model.",
                        default=0)

    parser.add_argument("--parallel",
                        help="parallelize acting and learning",
                        action='store_true')
    parser.add_argument(
        "--double_dqn",
        help="use double q-learning algorithm in error target calculation",
        action='store_true')
    args = parser.parse_args()

    if args.network_architecture == 'deepmind_nature':
        args.conv_kernel_shapes = [[8, 8, 4, 32], [4, 4, 32, 64],
                                   [3, 3, 64, 64]]
        args.conv_strides = [[1, 4, 4, 1], [1, 2, 2, 1], [1, 1, 1, 1]]
        args.dense_layer_shapes = [[3136, 512]]
    elif args.network_architecture == 'deepmind_nips':
        args.conv_kernel_shapes = [[8, 8, 4, 16], [4, 4, 16, 32]]
        args.conv_strides = [[1, 4, 4, 1], [1, 2, 2, 1]]
        args.dense_layer_shapes = [[2592, 256]]

    if not args.watch:
        train_stats = RecordStats(args, False)
        test_stats = RecordStats(args, True)
        training_emulator = AtariEmulator(args)
        testing_emulator = AtariEmulator(args)
        num_actions = len(training_emulator.get_possible_actions())
        experience_memory = ExperienceMemory(args, num_actions)

        q_network = None
        agent = None
        if args.parallel:
            q_network = ParallelQNetwork(args, num_actions)
            agent = ParallelDQNAgent(args, q_network, training_emulator,
                                     experience_memory, num_actions,
                                     train_stats)
        else:
            q_network = QNetwork(args, num_actions)
            agent = DQNAgent(args, q_network, training_emulator,
                             experience_memory, num_actions, train_stats)

        experiment.run_experiment(args, agent, testing_emulator, test_stats)

    else:
        testing_emulator = AtariEmulator(args)
        num_actions = len(testing_emulator.get_possible_actions())
        q_network = QNetwork(args, num_actions)
        agent = DQNAgent(args, q_network, None, None, num_actions, None)
        experiment.evaluate_agent(args, agent, testing_emulator, None)
Ejemplo n.º 21
0
def call_experiment(args):
    kwargs = {}

    model = args['model']['model']

    if model == 'LR':
        kwargs['regularization'] = args['model']['regularization']
    elif model == 'SVMNB':
        kwargs['beta'] = args['model']['beta']
    elif model == 'SVM':
        ktype = args['model']['kernel']['ktype']
        kwargs['kernel'] = ktype
        if ktype == 'poly':
            kwargs['degree'] = args['model']['kernel']['degree']
    feature_list = []
    feature_list.append(make_feature(args, 'unigrams', 'ngrams', n=True))
    feature_list.append(make_feature(args, 'bigrams', 'ngrams', n=True))
    feature_list.append(make_feature(args, 'pos_tags', 'list', lower=False, min_df=False))
    feature_list.append(make_feature(args, 'ner', 'list', lower=False, min_df=False))
    feature_list.append(make_feature(args, 'dependencies', 'list'))
    feature_list.append(make_feature(args, 'dependency_links', 'list', min_df=False, lower=False))
    feature_list.append(make_feature(args, 'jkgrams', 'list'))
    feature_list.append(make_feature(args, 'sentiments', 'list', min_df=False, lower=False))
    feature_list.append(make_feature(args, 'semantics', 'list', min_df=False))
    feature_list.append(make_feature(args, 'brown_clusters', 'list', min_df=False, lower=False, shorten=True))
    feature_list.append(make_feature(args, 'amalgram', 'list', min_df=False))
    #feature_list.append(make_feature(args, 'wikilinks', 'list'))
    feature_list.append(make_feature(args, 'lda', 'pkl', min_df=False, lower=False))
    feature_list.append(make_feature(args, 'personas', 'pkl', min_df=False, lower=False))
    feature_list.append(make_feature(args, 'storytypes', 'pkl', min_df=False, lower=False))
    feature_list = [f for f in feature_list if f is not None]

    additional_label_files = []
    additional_weights = []
    if add_pseudo:
        for label_type in args['psuedo-documents']:
            if args['psuedo-documents'][label_type]['use']:
                additional_label_files.append(fh.get_basename_wo_ext(label_file) + '_' + label_type)
                additional_weights.append(args['psuedo-documents'][label_type]['weight'])
    if len(additional_label_files) == 0:
        additional_label_files = None
    if len(additional_weights) == 0:
        additional_weights = None


    if reuse:
        kwargs['reuse'] = True
    else:
        kwargs['reuse'] = False

    alpha = None
    if search_alpha:
        kwargs['best_alpha'] = float(args['alpha'])

    base_dir = os.path.split(output_dirname)[0]
    basename = fh.get_basename_wo_ext(output_dirname)
    existing_dirs = glob.glob(os.path.join(base_dir, basename + '*'))
    max_num = 0
    for dir in existing_dirs:
        match = re.search(basename + '_(\d+)', dir)
        if match is not None:
            num = int(match.group(1))
            if num > max_num:
                max_num = num

    name = fh.get_basename_wo_ext(output_filename) + '_' + str(max_num + 1)

    print feature_list
    print additional_label_files
    print additional_weights
    if len(feature_list) == 0:
        # TODO: add in support for default (with no features)
        result = {'loss': 0, 'test_f1': 0, 'status': STATUS_OK}
    else:
        result = experiment.run_experiment(name, label_file, target, test_fold, feature_list, model_type=model,
                                           n_dev_folds=n_dev_folds, verbose=verbose, weight_col=weight_col,
                                           additional_label_files=additional_label_files,
                                           additional_label_weights=additional_weights,
                                           metric='mse', **kwargs)
    print result

    with codecs.open(output_filename, 'a') as output_file:
        output_file.write(str(datetime.datetime.now()) + '\t' + name + '\t' +
                          str(-result['loss']) + '\t' + str(result['test_f1']) + '\n')

    return result
Ejemplo n.º 22
0
# -*- coding: utf-8 -*-
# @Author: Ananth Ravi Kumar
# @Date:   2020-06-08 15:59:07
# @Last Modified by:   Ananth
# @Last Modified time: 2020-10-21 23:37:16

from experiment import run_experiment

all_results = run_experiment("pairwise",
                             "vae",
                             "cifar10", [0.75], [3],
                             num_iters=10,
                             batch_size=32,
                             normalize=True,
                             feature_extractor='hog')
Ejemplo n.º 23
0
import json

from experiment import run_experiment
from preprocessing import preprocess

if __name__ == '__main__':
    partitions = preprocess()
    for df in partitions:
        print(df.head())

    experiments = []
    for model in [
            'naive_bayes', 'knn', 'logistic_regression', 'svm',
            'decision_tree', 'neural_network'
    ]:
        if model in ['neural_network']:
            continue
        for document in ['bag_of_words', 'tf_idf', 'word_embedding']:
            payload = run_experiment(partitions, document, model, None)
            experiments.append(payload['results'])

            print('f1\t\tacc\t\troc_auc')
            print(payload['results']['f1_score'],
                  payload['results']['accuracy_score'],
                  payload['results']['roc_auc_score'])
            print('-' * 80)

    with open('output.json', 'w', encoding='utf-8') as fp:
        json.dump(experiments, fp, ensure_ascii=False, indent=4)
Ejemplo n.º 24
0
def main():

	parser = argparse.ArgumentParser('a program to train or run a deep q-learning agent')
	parser.add_argument("game", type=str, help="name of game to play")
	parser.add_argument("agent_type", type=str, help="name of learning/acting technique used")
	parser.add_argument("agent_name", type=str, help="unique name of this agent instance")
	parser.add_argument("--rom_path", type=str, help="path to directory containing atari game roms", default='../roms')
	parser.add_argument("--watch",
		help="if true, a pretrained model with the specified name is loaded and tested with the game screen displayed", 
		action='store_true')

	parser.add_argument("--epochs", type=int, help="number of epochs", default=200)
	parser.add_argument("--epoch_length", type=int, help="number of steps in an epoch", default=250000)
	parser.add_argument("--test_steps", type=int, help="max number of steps per test", default=125000)
	parser.add_argument("--test_steps_hardcap", type=int, help="absolute max number of steps per test", default=135000)
	parser.add_argument("--test_episodes", type=int, help="max number of episodes per test", default=30)
	parser.add_argument("--history_length", type=int, help="number of frames in a state", default=4)
	parser.add_argument("--training_frequency", type=int, help="number of steps run before training", default=4)
	parser.add_argument("--random_exploration_length", type=int, 
		help="number of randomly-generated experiences to initially fill experience memory", default=50000)
	parser.add_argument("--initial_exploration_rate", type=float, help="initial exploration rate", default=1.0)
	parser.add_argument("--final_exploration_rate", type=float, help="final exploration rate from linear annealing", default=0.1)
	parser.add_argument("--final_exploration_frame", type=int, 
		help="frame at which the final exploration rate is reached", default=1000000)
	parser.add_argument("--test_exploration_rate", type=float, help="exploration rate while testing", default=0.05)
	parser.add_argument("--frame_skip", type=int, help="number of frames to repeat chosen action", default=4)
	parser.add_argument("--screen_dims", type=tuple, help="dimensions to resize frames", default=(84,84))
	# used for stochasticity and to help prevent overfitting.  
	# Must be greater than frame_skip * (observation_length -1) + buffer_length - 1
	parser.add_argument("--max_start_wait", type=int, help="max number of frames to wait for initial state", default=60)
	# buffer_length = 1 prevents blending
	parser.add_argument("--buffer_length", type=int, help="length of buffer to blend frames", default=2)
	parser.add_argument("--blend_method", type=str, help="method used to blend frames", choices=('max'), default='max')
	parser.add_argument("--reward_processing", type=str, help="method to process rewards", choices=('clip', 'none'), default='clip')
	# must set network_architecture to custom in order use custom architecture
	parser.add_argument("--conv_kernel_shapes", type=tuple, 
		help="shapes of convnet kernels: ((height, width, in_channels, out_channels), (next layer))")
	# must have same length as conv_kernel_shapes
	parser.add_argument("--conv_strides", type=tuple, help="connvet strides: ((1, height, width, 1), (next layer))")
	# currently,  you must have at least one dense layer
	parser.add_argument("--dense_layer_shapes", type=tuple, help="shapes of dense layers: ((in_size, out_size), (next layer))")
	parser.add_argument("--discount_factor", type=float, help="constant to discount future rewards", default=0.99)
	parser.add_argument("--learning_rate", type=float, help="constant to scale parameter updates", default=0.00025)
	parser.add_argument("--optimizer", type=str, help="optimization method for network", 
		choices=('rmsprop', 'graves_rmsprop'), default='rmsprop')
	parser.add_argument("--rmsprop_decay", type=float, help="decay constant for moving average in rmsprop", default=0.95)
	parser.add_argument("--rmsprop_epsilon", type=int, help="constant to stabilize rmsprop", default=0.01)
	# set error_clipping to less than 0 to disable
	parser.add_argument("--error_clipping", type=str, help="constant at which td-error becomes linear instead of quadratic", default=1.0)
	# set gradient clipping to 0 or less to disable.  Currently only works with graves_rmsprop.
	parser.add_argument("--gradient_clip", type=str, help="clip gradients to have the provided L2-norm", default=0)
	parser.add_argument("--target_update_frequency", type=int, help="number of steps between target network updates", default=10000)
	parser.add_argument("--memory_capacity", type=int, help="max number of experiences to store in experience memory", default=1000000)
	parser.add_argument("--batch_size", type=int, help="number of transitions sampled from memory during learning", default=32)
	# must set to custom in order to specify custom architecture
	parser.add_argument("--network_architecture", type=str, help="name of prespecified network architecture", 
		choices=("deepmind_nips", "deepmind_nature, custom"), default="deepmind_nature")
	parser.add_argument("--recording_frequency", type=int, help="number of steps before tensorboard recording", default=50000)

	parser.add_argument("--saving_threshold", type=int, help="min score threshold for saving model.", default=0)

	parser.add_argument("--parallel", help="parallelize acting and learning", action='store_true')
	parser.add_argument("--double_dqn", help="use double q-learning algorithm in error target calculation", action='store_true')
	args = parser.parse_args()


	if args.network_architecture == 'deepmind_nature':
		args.conv_kernel_shapes = [
			[8,8,4,32],
			[4,4,32,64],
			[3,3,64,64]]
		args.conv_strides = [
			[1,4,4,1],
			[1,2,2,1],
			[1,1,1,1]]
		args.dense_layer_shapes = [[3136, 512]]
	elif args.network_architecture == 'deepmind_nips':
		args.conv_kernel_shapes = [
			[8,8,4,16],
			[4,4,16,32]]
		args.conv_strides = [
			[1,4,4,1],
			[1,2,2,1]]
		args.dense_layer_shapes = [[2592, 256]]

	if not args.watch:
		train_stats = RecordStats(args, False)
		test_stats = RecordStats(args, True)
		training_emulator = AtariEmulator(args)
		testing_emulator = AtariEmulator(args)
		num_actions = len(training_emulator.get_possible_actions())
		experience_memory = ExperienceMemory(args, num_actions)

		q_network= None
		agent = None
		if args.parallel:
			q_network = ParallelQNetwork(args, num_actions)
			agent = ParallelDQNAgent(args, q_network, training_emulator, experience_memory, num_actions, train_stats)
		else:
			q_network = QNetwork(args, num_actions)
			agent = DQNAgent(args, q_network, training_emulator, experience_memory, num_actions, train_stats)

		experiment.run_experiment(args, agent, testing_emulator, test_stats)

	else:
		testing_emulator = AtariEmulator(args)
		num_actions = len(testing_emulator.get_possible_actions())
		q_network = QNetwork(args, num_actions)
		agent = DQNAgent(args, q_network, None, None, num_actions, None)
		experiment.evaluate_agent(args, agent, testing_emulator, None)
def run(log):
    log.info("Running experiment")
    exp.run_experiment()
Ejemplo n.º 26
0
    qubits = [int(n) for n in list(G.nodes)]
    for p in range(p_min, p_max + 1):
        output = pd.DataFrame()

        # specifications
        specs = {
            'graph': graph_name,
            'p': p,
            'optimizer': optimizer['method'],
            'backend': backend_name,
            'noise': False
        }

        for i in range(repetitions):
            # Running an experiment
            print("\nExperiment " + str(i + 1) + "/" + str(repetitions) +
                  ", p = " + str(p))

            result = run_experiment(G,
                                    p,
                                    optimizer,
                                    backend,
                                    print_result=True)
            result.update(specs)  #adding specifications to the dictionary

            output = output.append(result, ignore_index=True)

        title = 'pyquil-' + specs['graph'] + '-' + specs[
            'backend'] + '-noiseless-p' + str(p)
        output.to_csv('data/' + title + '.csv')
p_in = train.shape[1] - 1
penalty_groups = list(np.arange(0, p_in + 1))

X_train = train[:, :-1] 
Y_train = train[:, -1]
X_val = val[:, :-1]
Y_val = val[:, -1]

# Determine architecture
hidden_units = [20]

# Prepare model
model = IIDEncoding(p_in, p_out, hidden_units, lr, opt, lam, penalty_groups, nonlinearity = 'sigmoid', task = 'classification')

# Run experiment
train_loss, val_loss, train_accuracy, val_accuracy, weights = run_experiment(model, X_train, Y_train, X_val, Y_val, nepoch, mbsize = mbsize, loss_check = 10)

# Save results
results = {
	'nepoch': nepoch,
	'lr': lr,
	'opt': opt,
	'lam': lam,
	'batchsize': batchsize,
	'seed': seed,
	'train_loss': train_loss,
	'train_accuracy': train_accuracy,
	'val_loss': val_loss,
	'val_accuracy': val_accuracy,
	'model': model
}
# List for results
results = []

# Run optimizations
for lam in lam_list:

    # Get model
    model = ParallelMLPEncoding(p_in, p_out, lag, hidden_units, lr, opt_type,
                                lam, penalty_type)

    # Run experiment
    train_loss, val_loss, weights_list = run_experiment(model,
                                                        X_train,
                                                        Y_train,
                                                        X_val,
                                                        Y_val,
                                                        nepoch,
                                                        mbsize=mbsize)

    # Create GC estimate grid
    GC_est = np.zeros((p_out, p_in))
    for target in range(p_out):
        W = weights_list[target]
        for candidate in range(p_in):
            start = candidate * lag
            end = (candidate + 1) * lag
            GC_est[target, candidate] = np.linalg.norm(W[:,
                                                         range(start, end)],
                                                       ord=2)
Ejemplo n.º 29
0
    t.start()

    all_relevant_config = {
        'experiment': experiment_config,
        'agents': relevant_agent_configuration
    }
    with open('{}/experiment_parameters.yml'.format(experiment_directory),
              'w') as outfile:
        yaml.dump(all_relevant_config, outfile, default_flow_style=False)

    experiment_durations = []
    for run_id in range(number_of_runs):
        logger.info(f'Starting run: {run_id}')
        start_time = time.time()
        experiment.run_experiment(experiment_id, experiment_directory, run_id,
                                  experiment_config,
                                  relevant_agent_configuration, seeds[run_id])
        experiment_duration = time.time() - start_time
        experiment_durations.append(experiment_duration)
        logger.info('Finished run: {}. Duration: {} (seconds)\n'.format(
            run_id, experiment_duration))

    total_experiment_duration = sum(experiment_durations)
    average_experiment_duration = np.mean(experiment_durations)
    standard_deviation_experiment_duration = np.std(experiment_durations)

    logger.info(
        'Total experiment duration: {}'.format(total_experiment_duration))
    logger.info(
        'Experiment mean run duration: {}'.format(average_experiment_duration))
    logger.info('Experiment std dev duration:  {}'.format(