コード例 #1
0
ファイル: main.py プロジェクト: ftakelait/diseases-ner
def main(argv):
    try:
        opts, _ = getopt.getopt(argv, "st:p:")
        if len(opts) != 1:
            raise getopt.GetoptError('Bad argument')

        opt, _ = opts[0]
        if opt == '-s':
            scrapper_script.run(logging)
        elif opt == '-t':
            if len(argv) != 2:
                raise getopt.GetoptError('Bad argument')

            train_script.run(logging, argv[1])
        elif opt == '-p':
            if len(argv) != 3:
                raise getopt.GetoptError('Bad argument')

            entities = predict_script.run(logging, argv[1], argv[2])
            for e in entities:
                print(e)
        else:
            raise getopt.GetoptError('Bad argument')
    except getopt.GetoptError:
        print(
            'Usage: main.py [-s] [-t "model name"] [-p "model name" "Text here"]'
        )
        print(opts)
        print(argv)
        exit(2)
def run_model(args, node_embeddings, action_embeddings, rnn_hidden_dim_1s, rnn_hidden_dim_2s, learning_rates):
    for node_embedding in node_embeddings:
        for action_embedding in action_embeddings:
            for rnn_hidden_dim_1 in rnn_hidden_dim_1s:
                for rnn_hidden_dim_2 in rnn_hidden_dim_2s:
                    for learning_rate in learning_rates:
                        args_copy = args.copy()
                        args_copy['--run-name'] = f'rnn_best_model__{args["--model"]}__ne__{node_embedding}__ae{action_embedding}__rnn1{rnn_hidden_dim_1}__rnn2{rnn_hidden_dim_2}__lr{learning_rate}'
                        args_copy['--hypers-override'] = json.dumps({
                            'action_embedding_size': action_embedding,
                            'rnn_hidden_dim_1': rnn_hidden_dim_1,
                            'learning_rate': learning_rate,
                        })
                        train.run(args_copy)

                        run_name = f"{args_copy['--run-name']}_best_model.bin"
                        accs = evaluate({
                            '--model': args['--model'],
                            '--saved-data-dir': args['--saved-data-dir'],
                            '--trained-model': os.path.join(args['--save-dir'], run_name),
                            '--validation-only': True,
                            '--qualitative': False
                        })

                        log_file_hyper_params.write("%15s  |  %15s  |  %15s  |  %15s  |  %15s  |  %15s  | %15s\n" %
                                                    (node_embedding, action_embedding, rnn_hidden_dim_1,
                                                     rnn_hidden_dim_2, learning_rate, accs[0].numpy(),
                                                     run_name))
コード例 #3
0
def create_repeats(sim_name, save_subfolder, settings, num_repeats,
                   food_summer, food_winter, only_fittest):
    settings = copy.deepcopy(settings)

    if only_fittest:
        pop_size = only_fittest_individuals(sim_name, save_subfolder)
        settings['pop_size'] = pop_size

    complete_sim_folder = '{}/{}'.format(save_subfolder, sim_name)
    settings['loadfile'] = complete_sim_folder

    settings['iter'] = detect_all_isings(complete_sim_folder)[-1]
    settings['LoadIsings'] = True
    settings['switch_off_evolution'] = True
    settings['save_data'] = False
    settings['switch_seasons_repeat_pipeline'] = True
    # Animations:
    settings['plot_generations'] = [1]

    #  Number of repeats
    # Iterations = 200
    Iterations = num_repeats

    settings['repeat_pipeline_switched_boo'] = False
    train.run(settings, Iterations)

    #  switch seasons
    if settings['food_num'] == food_summer:
        settings['food_num'] = food_winter
    elif settings['food_num'] == food_winter:
        settings['food_num'] = food_summer

    settings['repeat_pipeline_switched_boo'] = True
    train.run(settings, Iterations)
コード例 #4
0
def main(
    config: Optional[dict],
    cpus_per_trial: int,
    data: Path,
    gpus_per_trial: int,
    local_mode: bool,
    n_samples: int,
    name: str,
    seeds: List[int],
    **kwargs,
):
    for k, v in kwargs.items():
        if v is not None or k not in config:
            config[k] = v
    seed = config.get("seed")
    if not seed:
        if not seeds:
            raise RuntimeError(
                "Either seed must be set or seeds must be non-empty")
        elif len(seeds) == 1:
            seed = seeds[0]
        else:
            seed = tune.grid_search(seeds)
    config.update(seed=seed)

    config.update(data=data.absolute())
    if n_samples or local_mode:
        config.update(report=tune.report)
        ray.init(dashboard_host="127.0.0.1", local_mode=local_mode)
        kwargs = dict()
        if any(isinstance(v, Apply) for v in config.values()):
            kwargs = dict(
                search_alg=HyperOptSearch(config, metric="test_loss"),
                num_samples=n_samples,
            )

        def _run(c):
            run(**c)

        tune.run(
            _run,
            name=name,
            config=config,
            resources_per_trial=dict(gpu=gpus_per_trial, cpu=cpus_per_trial),
            **kwargs,
        )
    else:

        def report(**kwargs):
            print(
                tabulate(
                    {k: [v]
                     for k, v in kwargs.items()},
                    headers="keys",
                    tablefmt="pretty",
                ))

        config.update(report=report)
        run(**config)
コード例 #5
0
def main(_):
    # Set up some parameters.
    config = FLAGS
    model_name = config.name + \
                 (config.n_q_agents > 0 and ('qa' + str(config.n_q_agents) + '_') or '') + \
                 (config.n_q2_agents > 0 and ('q2a' + str(config.n_q2_agents) + '_') or '') + \
                 (config.n_e_agents > 0 and ('ea' + str(config.n_e_agents) + '_') or '') + \
                 (config.n_titdat_agents > 0 and ('ta' + str(config.n_titdat_agents) + '_') or '') + \
                 (config.n_c_agents > 0 and ('ca' + str(config.n_c_agents) + '_') or '') + \
                 (config.n_d_agents > 0 and ('da' + str(config.n_d_agents) + '_') or '') + \
                 'state' + str(config.state_size) + '_' + \
                 'lr' + str(config.learning_rate) + '_' + \
                 'lr_decay' + str(config.lr_decay) + '_' + \
                 'n_episodes' + str(config.n_episodes) + '_' + \
                 'n_batches' + str(config.n_batches) + '_' + \
                 'discount' + str(config.discount) + '_' + \
                 'e' + str(config.e) + '_' + \
                 'adapt' + str(config.adapt) + '_' + \
                 'r' + str(config.reward) + '_' + \
                 't' + str(config.temptation) + '_' + \
                 's' + str(config.sucker) + '_' + \
                 'p' + str(config.punishment)

    config.model_output_path = FLAGS.model_output or os.path.join(
        'train/', model_name + '/')
    config.model_output = FLAGS.model_output or os.path.join(
        FLAGS.model_output_path, 'model.ckpt')
    config.log_output = FLAGS.log_output or os.path.join('log/', model_name)
    config.debug = True  # debug mode
    config.n_agents = config.n_q_agents + config.n_q2_agents + config.n_e_agents + config.n_titdat_agents + config.n_c_agents + config.n_d_agents

    # set up logger
    dirpath = os.path.dirname(config.log_output)
    if not os.path.exists(dirpath):
        os.makedirs(dirpath)
    if not os.path.exists(config.model_output_path):
        os.makedirs(config.model_output_path)
    handler = logging.FileHandler(config.log_output)
    handler.setLevel(logging.DEBUG)
    handler.setFormatter(
        logging.Formatter('%(asctime)s:%(levelname)s: %(message)s'))
    logging.getLogger().addHandler(handler)

    logger = logging.getLogger("209_project")
    logger.setLevel(logging.DEBUG)
    logging.basicConfig(format='%(levelname)s:%(message)s',
                        level=logging.DEBUG)

    logger.info('Config:')
    pp = pprint.PrettyPrinter(indent=1)
    logger.info(pp.pformat(config.__flags))
    logger.info('\n\n\n')

    if config.n_agents <= 0:
        logger.info('number of agents must be greater than 1')
        exit(1)

    # run model
    train.run(config)
コード例 #6
0
def run_repeat(num_foods, settings, pipeline_settings):

    settings['food_num'] = num_foods
    settings[
        'dynamic_range_pipeline_save_name'] = 'dynamic_range_run_foods_{}'.format(
            num_foods)
    Iterations = pipeline_settings['num_repeats']
    train.run(settings, Iterations)
コード例 #7
0
ファイル: main.py プロジェクト: yuanluw/streetCharDetection
def main():
    arg = get_augments()
    if arg.action == "train":
        from train import run
        run(arg)
    elif arg.action == "test":
        from test import run
        run(arg)
コード例 #8
0
ファイル: train_multiprocess.py プロジェクト: zxr8192/singa
def run(args, local_rank, world_size, nccl_id):
    sgd = opt.SGD(lr=args.lr, momentum=0.9, weight_decay=1e-5)
    sgd = opt.DistOpt(sgd,
                      nccl_id=nccl_id,
                      local_rank=local_rank,
                      world_size=world_size)
    train.run(sgd.global_rank, sgd.world_size, sgd.local_rank, args.max_epoch,
              args.batch_size, args.model, args.data, sgd, args.graph,
              args.dist_option, args.spars)
コード例 #9
0
def main(_):
    FLAGS = tf.app.flags.FLAGS
    pp = pprint.PrettyPrinter()
    FLAGS._parse_flags()
    pp.pprint(FLAGS.__flags)

    # Load embedding
    emb_matrix, char2id, id2char = Helper.get_embedding(FLAGS.embedding_path)
    print "Load embedding"

    # Directly load data into list
    train_data_list = Helper.read_json_file(FLAGS.train_json_path)
    print 'Train data num:', len(train_data_list)
    test_data_list = Helper.read_json_file(FLAGS.test_json_path)
    # test_data_list =None

    # Create model storage directories
    if not os.path.exists(FLAGS.ckpt_dir):
        os.makedirs(FLAGS.ckpt_dir)

    timestamp = datetime.now().strftime('%c')
    FLAGS.log_dir = os.path.join(FLAGS.log_dir, timestamp)
    if not os.path.exists(FLAGS.log_dir):
        os.makedirs(FLAGS.log_dir)

    # Gpu number
    os.environ["CUDA_VISIBLE_DEVICES"] = FLAGS.gpu
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True

    # Train Model
    with tf.Session(config=config) as sess:
        model = SeaReader(FLAGS.doc_max_len, FLAGS.top_n,
                          FLAGS.statement_max_len, FLAGS.hidden_dim,
                          FLAGS.answer_num, FLAGS.embedding_dim, emb_matrix,
                          FLAGS.learning_rate, sess)
        saver = tf.train.Saver(max_to_keep=50)

        # Run evaluation
        if FLAGS.evaluate:
            print '[?] Test run'
            if not FLAGS.restore_file:
                print('Need to specify a restore_file checkpoint to evaluate')
            else:
                print('[?] Loading variables from checkpoint %s' %
                      FLAGS.restore_file)
                saver.restore(sess, FLAGS.restore_file)
                test.run(FLAGS, sess, model, test_data_list, char2id)
        elif FLAGS.debug_run:
            print '[?] Debug run'
            train.debug_run(FLAGS, sess, model, train_data_list,
                            test_data_list, char2id, saver)
        else:
            print '[?] Run'
            train.run(FLAGS, sess, model, train_data_list, test_data_list,
                      char2id, saver)
コード例 #10
0
ファイル: launch_batch.py プロジェクト: a3lab/crocodile
def run_with_logger(args):
    from logger import ExpvizLogger
    log_dir = os.path.join(
        args.output_path,
        "exp_%i_%i/" % (int(time.time()), np.random.randint(9999)))
    logger = ExpvizLogger(projectname=PROJECT_NAME,
                          hyperparams=args,
                          log_dir=log_dir,
                          expname=NAME)
    run(args, logger)
コード例 #11
0
ファイル: veres.py プロジェクト: gifford-lab/prescient
def main():

    parser = argparse.ArgumentParser()
    parser.add_argument('-s', '--seed', type=int, default=0)
    parser.add_argument('--no-cuda', action='store_true')
    parser.add_argument('--device', default=7, type=int)
    parser.add_argument('--out_dir', default='./experiments')
    # -- data options
    parser.add_argument('--data_path')
    parser.add_argument('--data_dir')
    parser.add_argument('--weight_path', default=None)
    # -- model options
    parser.add_argument('--loss', default='euclidean')
    parser.add_argument('--k_dim', default=500, type=int)
    parser.add_argument('--activation', default='softplus')
    parser.add_argument('--layers', default=1, type=int)
    # -- pretrain options
    parser.add_argument('--pretrain_lr', default=1e-9, type=float)
    parser.add_argument('--pretrain_epochs', default=500, type=int)
    # -- train options
    parser.add_argument('--train_epochs', default=5000, type=int)
    parser.add_argument('--train_lr', default=0.01, type=float)
    parser.add_argument('--train_dt', default=0.1, type=float)
    parser.add_argument('--train_sd', default=0.5, type=float)
    parser.add_argument('--train_tau', default=0, type=float)
    parser.add_argument('--train_batch', default=0.1, type=float)
    parser.add_argument('--train_clip', default=0.25, type=float)
    parser.add_argument('--save', default=100, type=int)
    # -- test options
    parser.add_argument('--evaluate_n', default=10000, type=int)
    parser.add_argument('--evaluate_data')
    parser.add_argument('--evaluate-baseline', action='store_true')
    # -- run options
    parser.add_argument('--task', default='fate')
    parser.add_argument('--train', action='store_true')
    parser.add_argument('--evaluate')
    parser.add_argument('--config')
    args = parser.parse_args()

    if args.task == 'fate':

        if args.train:

            args.pretrain = True
            args.train = True

            train.run(args, train_fate)

        if args.evaluate == 'fit':

            if args.config:
                config = SimpleNamespace(**torch.load(args.config))
                evaluate_fit(args, config)
            else:
                print('Please provide a config file')
コード例 #12
0
def main(_):
    FLAGS = tf.app.flags.FLAGS
    pp = pprint.PrettyPrinter()
    FLAGS._parse_flags()
    pp.pprint(FLAGS.__flags)

    # Load Data
    X_train, Q_train, Y_train = data_helper.load_data('train')
    X_test, Q_test, Y_test = data_helper.load_data('valid')

    vocab_size = np.max(X_train) + 1
    print('[?] Vocabulary Size:', vocab_size)

    # Create directories
    if not os.path.exists(FLAGS.ckpt_dir):
        os.makedirs(FLAGS.ckpt_dir)

    timestamp = datetime.now().strftime('%c')
    FLAGS.log_dir = os.path.join(FLAGS.log_dir, timestamp)
    if not os.path.exists(FLAGS.log_dir):
        os.makedirs(FLAGS.log_dir)

    # Train Model
    with tf.Session(config=tf.ConfigProto(
            log_device_placement=False,
            allow_soft_placement=True)) as sess, tf.device('/gpu:0'):
        model = AlternatingAttention(FLAGS.batch_size,
                                     vocab_size,
                                     FLAGS.encoding_dim,
                                     FLAGS.embedding_dim,
                                     FLAGS.num_glimpses,
                                     session=sess)

        if FLAGS.trace:  # Trace model for debugging
            train.trace(FLAGS, sess, model, (X_train, Q_train, Y_train))
            return

        saver = tf.train.Saver()

        if FLAGS.restore_file is not None:
            print('[?] Loading variables from checkpoint %s' %
                  FLAGS.restore_file)
            saver.restore(sess, FLAGS.restore_file)

        # Run evaluation
        if FLAGS.evaluate:
            if not FLAGS.restore_file:
                print('Need to specify a restore_file checkpoint to evaluate')
            else:
                test_data = data_helper.load_data('test')
                word2idx, _, _ = data_helper.build_vocab()
                test.run(FLAGS, sess, model, test_data, word2idx)
        else:
            train.run(FLAGS, sess, model, (X_train, Q_train, Y_train),
                      (X_test, Q_test, Y_test), saver)
コード例 #13
0
def main():
    args = get_augments()
    conf = __import__(args.conf, globals(), locals(), ["Config"]).Config()
    if args.action == "train":
        import train
        train.run(conf,
                  args.net,
                  args.lr,
                  args.decay,
                  args.epochs,
                  pre_train=args.pre_train)
コード例 #14
0
def run_repeat(gene_perturb, isings_orig, settings, pipeline_settings):

    settings['save_energies_velocities_last_gen'] = pipeline_settings['save_energies_velocities']
    print('Genetic perturbation with factor {}'.format(gene_perturb))

    settings['dynamic_range_pipeline_save_name'] = '{}genotype_phenotype_mapping_{}'.format(pipeline_settings['add_save_file_name'], gene_perturb)

    perturbed_isings = mutate_genotype_main(isings_orig, gene_perturb, pipeline_settings['genetic_perturbation_constant']
                                            , pipeline_settings['number_of_edges_to_perturb'], settings)

    settings['set_isings'] = perturbed_isings
    Iterations = pipeline_settings['num_repeats']
    train.run(settings, Iterations)
コード例 #15
0
def main(_):
    FLAGS = tf.app.flags.FLAGS
    pp = pprint.PrettyPrinter()
    FLAGS._parse_flags()
    pp.pprint(FLAGS.__flags)

    # Load Data
    X_train, Q_train, Y_train = data_helper.load_data('train')
    X_test, Q_test, Y_test = data_helper.load_data('valid')

    vocab_size = np.max(X_train) + 1
    print('[?] Vocabulary Size:', vocab_size)

    # Create directories
    if not os.path.exists(FLAGS.ckpt_dir):
        os.makedirs(FLAGS.ckpt_dir)

    timestamp = datetime.now().strftime('%c')
    FLAGS.log_dir = os.path.join(FLAGS.log_dir, timestamp)
    if not os.path.exists(FLAGS.log_dir):
        os.makedirs(FLAGS.log_dir)

    # Train Model
    with tf.Session(config=tf.ConfigProto(log_device_placement=False, allow_soft_placement=True)) as sess, tf.device('/gpu:0'):
        model = AlternatingAttention(FLAGS.batch_size, vocab_size, FLAGS.encoding_dim, FLAGS.embedding_dim, FLAGS.num_glimpses, session=sess)

        if FLAGS.trace: # Trace model for debugging
            train.trace(FLAGS, sess, model, (X_train, Q_train, Y_train))
            return

        saver = tf.train.Saver()

        if FLAGS.restore_file is not None:
            print('[?] Loading variables from checkpoint %s' % FLAGS.restore_file)
            saver.restore(sess, FLAGS.restore_file)

        # Run evaluation
        if FLAGS.evaluate:
            if not FLAGS.restore_file:
                print('Need to specify a restore_file checkpoint to evaluate')
            else:
                test_data = data_helper.load_data('test')
                word2idx, _, _ = data_helper.build_vocab()
                test.run(FLAGS, sess, model, test_data, word2idx)
        else:
            train.run(FLAGS, sess, model,
                    (X_train, Q_train, Y_train),
                    (X_test, Q_test, Y_test),
                    saver)
コード例 #16
0
def main():
    model = ""
    opts, args = getopt.getopt(sys.argv[1:], "ht:p:m:")
    for op, value in opts:
        if op == "-h":
            usage()
            sys.exit()
        elif op == "-t":
            input_file = value
        elif op == "-p":
            input_file = value
        elif op == "-m":
            model = value

    input_wav = ffmpeg.convert(input_file)
    if model == "":
        input_wav = "20160203c.wav"
        songs_20160203c = [[0, 265], [1028, 1245], [1440, 1696], [2177, 2693]]
        song_dump(songs_20160203c)
        model = train.run(input_wav, songs_20160203c)
    Y, delimit_points = predict.run(input_wav, model)
    for i in delimit_points:
        print str(i / 3600) + ":" + str(i % 3600 / 60) + ":" + str(i % 60)

    ffmpeg.cut(input_file, delimit_points)

    plt.figure()
    plt.plot(-0.2)
    plt.plot(1.2)
    if model == "":
        plt.plot(songs_20160203c, 'b')
    plt.plot(Y, 'r')
    plt.show()
コード例 #17
0
def partial_train_and_decode_and_eval():
    file_length = file_len('heb-pos.train')
    print str(file_length)
    tenth = file_length / 10
    print tenth
    for n in range(1, 11, 1):
        print n
        with open("heb-pos.train") as train_file:
            head = [next(train_file) for x in xrange(tenth * n)]
        with open("exps/partial.train", "w+") as partial:
            for line in head:
                partial.write(line)
        train.run('2', "exps/partial.train", 'y')
        decode.run('2', 'heb-pos.test', 'exps/hmm-part-smooth-y.lex',
                   'exps/hmm-part-smooth-y.gram')
        evaluate.run('results/hmm.tagged', 'heb-pos.gold', '2', 'y')
コード例 #18
0
def launch_closerlook(params):
    import sys
    sys.path.insert(0, '/private/home/sbaio/aa/dataset_design_few_shot/cl_fsl')
    from train import run
    from save_features import run_save
    from test import run_test
    print('Launching Closer Look training with params', params)

    # train
    run(params)

    # save features
    run_save(params)

    # test
    run_test(params)
コード例 #19
0
def generate_images(run_name, output_dir, n_samples=20480, verbose=False):
    import train
    import tensorflow as tf

    path = pathlib.Path(run_name)
    if str(path.name).startswith('model-'):  # assume we're given a checkpoint
        ckpt = str(path.name)
        path = path.parent
        ckpt = path / ckpt
    else:
        with open(path/ 'checkpoint') as f:
            ckpt = f.readline().strip()[24:-1]

    with open(path / 'argv') as f:
        argv = f.read()

    argv += ' --resume_checkpoint %s' % ckpt
    argv += ' --sample_images %s' % output_dir
    argv += ' --sample_images_size %s' % n_samples
    argv += ' --verbosity 0'
    if verbose:
        print(argv)
    argv = argv.split()[1:]
    parser = train.setup_argumentparser()
    args = parser.parse_args(argv)
    tf.reset_default_graph()
    res = train.run(args.dataset, args.generator, args.discriminator, args.latentsize,
            args.dimension, args.epsilon, args.learningrate, args.batch_size, args, '/tmp')
    tf.reset_default_graph()
    return res
コード例 #20
0
def objective(dataset, model, lr, space):
    optimizer = Adam(model.parameters(), lr=lr, weight_decay=space['weight_decay'])
    evals = run(dataset, model, optimizer, early_stopping=False)
    return {
        'loss': -evals['val_acc'],
        'status': STATUS_OK
        }
コード例 #21
0
def main():
    arg = get_augments()
    if arg.action == "dataset":
        from dataset import get_dataset
        train_data = get_dataset(arg, train=True)
        for im, mask in train_data:
            print(mask.size())
        val_data = get_dataset(arg, train=False)
        for im, mask in val_data:
            print(im.size())
    elif arg.action == "train":
        from train import run
        run(arg)
    elif arg.action == "test":
        from test import run
        run(arg)
コード例 #22
0
def run_sim_and_create_repeats(save_subfolder, settings, Iterations,
                               num_repeats, food_summer, food_winter,
                               only_fittest):
    settings['save_subfolder'] = save_subfolder

    sim_name = train.run(settings, Iterations)
    create_repeats(sim_name, save_subfolder, settings, num_repeats,
                   food_summer, food_winter, only_fittest)
コード例 #23
0
ファイル: main.py プロジェクト: karasai1991412/CheckSentence
    def run(self):
        if (self.args.mode == 'train'):
            #train model for sentences and labels based on data_sent_path and data_answer_path
            if (path.exists(self.args.data_sent_path)
                    and path.exists(self.args.data_answer_path)):
                pass
            else:
                print("pls insert data_sent_path and data_answer_path")
                return
                pass
            train.run(OUTPUT_DIR=self.args.ModelPath,
                      data_sent_path=self.args.data_sent_path,
                      data_answer_path=self.args.data_answer_path,
                      NUM_TRAIN_EPOCHS=self.args.epochs,
                      n_train=self.args.linesToTrain)

            pass
        elif (self.args.mode == 'estimate'):
            #make result_answers_all.csv file and compare it to original
            estimator.config(self.args.ModelPath)
            estimator.run(self.args.data_sent_path,
                          n_lines=self.args.n_lines,
                          outputpath=self.args.data_output)
            pass
        elif (self.args.mode == 'getAcc'):
            #make result_answers_all.csv file and compare it to original
            print(self.get_acc())
            pass
        elif (self.args.mode == "crossValidation"):
            train.k_fold_cross_validation(
                k=10,
                data_sent_path=self.args.data_sent_path,
                data_answer_path=self.args.data_answer_path,
                ModelPath=self.args.ModelPath)
            pass
        else:
            #predict the sentence
            estimator.config(self.args.ModelPath)
            sents = [""]
            sents.append(self.args.sentence)
            predictions = estimator.getPrediction(sents)
            print(predictions[1][2])
            return predictions[1][2]
            pass
        pass
コード例 #24
0
def main():
    parser = argparse.ArgumentParser(description='Fast weights Training')

    parser.add_argument('--exp',
                        default='all',
                        help='run a single experiment or all experiments')
    parser.add_argument('--seed', default=1234, type=int, help='random seed')
    opts = parser.parse_args()
    exp_files = sorted(glob.glob('exps/*/config.yml'))
    exp_already_run = [
        str(Path(f).parent / 'config.yml')
        for f in glob.glob('exps/*/*_testinglog.npy')
    ]
    exp_to_run = list(set(exp_files) - set(exp_already_run))
    if opts.exp == 'all':
        for exp in exp_to_run:
            with open(exp, 'r') as f:
                args = yaml.safe_load(f)
            if not os.path.exists(os.path.join(args['log_dir'], args['name'])):
                os.mkdir(os.path.join(args['log_dir'], args['name']))

            if not os.path.exists(
                    os.path.join(args['dir'], args['config']['output_dir'])):
                os.mkdir(
                    os.path.join(args['dir'], args['config']['output_dir']))
            print(f'Working in experiment {args["name"]}')
            run(args, random_seed=opts.seed)

    elif opts.exp in exp_files:
        if opts.exp in exp_to_run:
            with open(opts.exp, 'r') as f:
                args = yaml.safe_load(f)
            if not os.path.exists(os.path.join(args['log_dir'], args['name'])):
                os.mkdir(os.path.join(args['log_dir'], args['name']))

            if not os.path.exists(
                    os.path.join(args['dir'], args['config']['output_dir'])):
                os.mkdir(
                    os.path.join(args['dir'], args['config']['output_dir']))
            print(f'Working in experiment {args["name"]}')
            run(args, random_seed=opts.seed)
        else:
            raise Exception('This is a done experiment')
    else:
        raise Exception('Unknown experiment')
コード例 #25
0
def main():
    args = parse_args()
    if args.config_file is None:
      raise Exception('no configuration file!')

    config = utils.config_parser.load(args.config_file)
    
    config.publish = args.publish
    config.dev = args.dev

    config.vote = args.vote
    
    # pp.pprint(config)

    if config.mode == "TRA":
        train.run(config)
    elif config.mode == "PRD":
        predict.run(config)
コード例 #26
0
def run_repeat(num_foods, settings, pipeline_settings, food_num_arr, original_mean_food_num):

    if pipeline_settings['varying_parameter'] == 'time_steps':
        settings['TimeSteps'] = num_foods
        # Activate saving of energies and velocities during life time for simulation with similar varying param as
        # original simulation and for largest varying param
        if num_foods == original_mean_food_num or num_foods == np.max(food_num_arr):
            settings['save_energies_velocities_last_gen'] = True
        print(num_foods)
    elif pipeline_settings['varying_parameter'] == 'food':
        settings['food_num'] = num_foods

    if pipeline_settings['varying_parameter'] == 'food':
        settings['dynamic_range_pipeline_save_name'] = '{}dynamic_range_run_foods_{}'.format(pipeline_settings['add_save_file_name'], num_foods)
    elif pipeline_settings['varying_parameter'] == 'time_steps':
        settings['dynamic_range_pipeline_save_name'] = '{}dynamic_range_run_time_step_{}'.format(pipeline_settings['add_save_file_name'], num_foods)
    Iterations = pipeline_settings['num_repeats']
    train.run(settings, Iterations)
コード例 #27
0
def main():
    argument_parser = argparse.ArgumentParser()
    argument_parser.add_argument('--model')
    argument_parser.add_argument('--train-data', default='data/train')
    argument_parser.add_argument('--test-data', default='data/test')
    argument_parser.add_argument('--height', default=139, type=int)
    argument_parser.add_argument('--width', default=139, type=int)
    argument_parser.add_argument('--batch-size', default=32, type=int)
    argument_parser.add_argument('--image_shape', default='', type=str)
    argument_parser.add_argument('--epochs', type=int, default=50)
    argument_parser.add_argument('--lr', type=float, default=.001)

    args = argument_parser.parse_args()

    if args.image_shape == '':
        image_shape = (args.width, args.height, 3)
    else:
        image_shape = [int(x) for x in args.image_shape.split(',')]

    default_params = {
        'image_shape': image_shape,
        'train_data': args.train_data,
        'test_data': args.test_data,
        'lr': args.lr,
        'batch_size': args.batch_size,
        'epochs': args.epochs
    }

    if args.model is None:
        models = []

        for model_name in get_available_models():
            params = default_params.copy()

            if model_name == 'pvgg16' or model_name == 'irsv2':
                params['lr'] = .0001
                params['epochs'] = 25

            models.append((model_name, params))
    else:
        models = [(args.model, default_params)]

    for model, params in models:
        run(model, **params)
コード例 #28
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "--algo",
        help=
        "the training algorithm, sarsa for SARSA and qlearning for Q-learning")
    args = parser.parse_args()
    if args.algo == "sarsa":
        algo = train.SARSA
        print "Training using SARSA algorithm"
    else:
        algo = train.QLearning
        print "Training using Q-learning algorithm"

    circle = tictactoe.ActionValueFunc(tictactoe.PlayerCircle)
    cross = tictactoe.ActionValueFunc(tictactoe.PlayerCross)
    train.run(algo, circle, cross)

    print "Training completed, game starting..."
    while True:
        user = raw_input("Please choose a player, O or X: ")
        if user == "X" or user == "x":
            user = tictactoe.PlayerCross
            opponent = circle
            print "You are player X"
        else:
            user = tictactoe.PlayerCircle
            opponent = cross
            print "You are player O"

        opponentFirst = True
        yn = raw_input(
            "Do you want to go first? Y for yes, N for no, others for either way: "
        )
        if yn == "Y" or yn == "y":
            opponentFirst = False
        elif yn == "N" or yn == "n":
            opponentFirst = True
        else:
            if random.random() < 0.5:
                opponentFirst = False

        game.run(user, opponent, opponentFirst)
コード例 #29
0
ファイル: gui.py プロジェクト: jatinvyas1/SmartCam
 def TakeImage():        
     rollNo=(txt.get())
     name=(txt2.get())
     if rollNo=="" or name=="":
         var.set("Please enter your details first ")
     elif not rollNo.isdigit():
         var.set("Please enter valid faculty id")
     elif not all(x.isalpha() or x.isspace() for x in name):
         var.set("Please enter valid name")
     else:
         msg=t.run(name,rollNo)
         var.set(msg)
コード例 #30
0
def main(config):
    print("Experiment")
    size_list = [200, 400, 600]
    lr_list = [0.0001]
    hidden_list = [128, 256, 512]
    """
    dict_list = []
    ## Generating Embedding
    for size in size_list:
        print("Generating Embedding -- size:", str(size))
        config.size = size
        generator = EmbeddingGenerator(config.train_path, config.dict_path, config.tokenizer_name, config)
        dict_path = generator.generate()
        dict_list.append(dict_path)
    """

    dict_list = ["word2vec/1", "word2vec/2", "word2vec/3"]

    history = {
        "train_loss": [],
        "valid_loss": [],
        "valid_correct": [],
        "epoch": [],
        "lr": [],
        "word2vec": [],
        "hidden_size": []
    }

    ## Testing multiple options
    for dict_path in dict_list:
        config.dict_path = dict_path
        for lr in lr_list:
            config.lr = lr
            for hidden_size in hidden_list:
                config.save_path = None
                config.hidden_size = hidden_size
                temp_history = run(config)
                epoch_size = len(temp_history["train_loss"])
                temp_epoch = [epoch for epoch in range(epoch_size)]
                temp_lr = [lr for epoch in range(epoch_size)]
                temp_word2vec = [dict_path for epoch in range(epoch_size)]
                temp_hidden_size = [hidden_size for epoch in range(epoch_size)]

                history["train_loss"].extend(temp_history["train_loss"])
                history["valid_loss"].extend(temp_history["valid_loss"])
                history["valid_correct"].extend(temp_history["valid_correct"])
                history["epoch"].extend(temp_epoch)
                history["lr"].extend(temp_lr)
                history["word2vec"].extend(temp_word2vec)
                history["hidden_size"].extend(temp_hidden_size)

    df = pd.DataFrame(history)
    df.to_csv("result.csv", encoding="UTF8", index=False)
コード例 #31
0
def run_train(state, rank, tstate, tcomm, tzcomm):
    """Run training and write a new model to the model_dir.

    Args:
        state: the RL loop State instance.
    """

    # sample records from selfplay.
    # distribute sampled records across training nodes if multinode training.
    num_examples, record_paths = get_training_input(state, rank, tcomm, tzcomm)

    # train
    with logged_timer('[rank {}] Training: {}'.format(rank, state.iter_num)):
        minigo_train.run(state, rank, tstate, num_examples, record_paths)

    if rank == 0:
        # Append the time elapsed from when the RL was started to when this model
        # was trained.
        elapsed = time.time() - state.start_time
        append_timestamp(elapsed, state.train_model_name)
        log_event(key='save_model', value={'iteration': state.iter_num})
コード例 #32
0
ファイル: main.py プロジェクト: fumin/tdttt
def main():
  parser = argparse.ArgumentParser()
  parser.add_argument("--algo", help="the training algorithm, sarsa for SARSA and qlearning for Q-learning")
  args = parser.parse_args()
  if args.algo == "sarsa":
    algo = train.SARSA
    print "Training using SARSA algorithm"
  else:
    algo = train.QLearning
    print "Training using Q-learning algorithm"

  circle = tictactoe.ActionValueFunc(tictactoe.PlayerCircle)
  cross = tictactoe.ActionValueFunc(tictactoe.PlayerCross)
  train.run(algo, circle, cross)

  print "Training completed, game starting..."
  while True:
    user = raw_input("Please choose a player, O or X: ")
    if user == "X" or user == "x":
      user = tictactoe.PlayerCross
      opponent = circle
      print "You are player X"
    else:
      user = tictactoe.PlayerCircle
      opponent = cross
      print "You are player O"

    opponentFirst = True
    yn = raw_input("Do you want to go first? Y for yes, N for no, others for either way: ")
    if yn == "Y" or yn == "y":
      opponentFirst = False
    elif yn == "N" or yn == "n":
      opponentFirst = True
    else:
      if random.random() < 0.5:
        opponentFirst = False

    game.run(user, opponent, opponentFirst)
コード例 #33
0
ファイル: train_and_validate.py プロジェクト: sergeyk/csrec
import train
import test
import time

if __name__=='__main__':
    import load_config
    t_start = time.time()
    params_filename = train.run(load_config.cfg)
    test.run(load_config.cfg, params_filename)
    print 'run completed in %s sec' % (time.time() - t_start)
コード例 #34
0
ファイル: main.py プロジェクト: GerardMJuan/MCV-M3
__author__ = "Sergi Sancho, Adriana Fernandez, Eric Lopez y Gerard Marti"
__credits__ = ['Sergi Sancho', 'Adriana Fernandez', 'Eric Lopez', 'Gerard Marti']
__license__ = "GPL"
__version__ = "1.0"

import extract_features
import train
import test
import draw_results
from MatlabCode import detect_signals

#DETECTION

#Color segmentation
detect_signals.run()

#CLASSIFICATION
extract_features.run() # Extracts the features for all the images to train the classifier
train.run() # Train the classifier

#TESTING AND EVALUATION
test.run() # Test a whole folder
draw_results.run() # Draw the detected boxes in the images
コード例 #35
0
#!/usr/bin/python

from pylab import plot, show, arange, legend, xlabel, ylabel
from train import run
from data_loader_mnist import load_mnist
from data_loader_cifrar import load_cifrar
from feed_forward import LINEARIZE, FULLY_CONNECTED, TANH, SOFTMAX

xlabel('Numarul de imagini de antrenare')
ylabel('Acuratete')

mnist = load_mnist()
cifrar = load_cifrar()
arhitecture = [(LINEARIZE, -1), (FULLY_CONNECTED, 300), (TANH, -1), (FULLY_CONNECTED, 100), (TANH, -1), (FULLY_CONNECTED, 10), (SOFTMAX, -1)]
(inputTrainC, outputTrainC, inputTestC, outputTestC) = run(cifrar, arhitecture, 0.002, 4000, 40000)

x = inputTrainC
y = outputTrainC
plot(x, y, color='green', label='CIFAR_TRAIN')

x = inputTestC
y = outputTestC
plot(x, y, color='blue', label='CIFAR_TEST')

legend(prop={'size':6})
show()
コード例 #36
0
#!/usr/bin/python

from pylab import plot, show, arange, legend, xlabel, ylabel
from train import run
from data_loader_mnist import load_mnist
from data_loader_cifrar import load_cifrar
from feed_forward import LINEARIZE, FULLY_CONNECTED, TANH, SOFTMAX

xlabel('Numarul de imagini de antrenare')
ylabel('Acuratete Teste')

mnist = load_mnist()
arhitecture = [(LINEARIZE, -1), (FULLY_CONNECTED, 300), (TANH, -1), (FULLY_CONNECTED, 100), (TANH, -1), (FULLY_CONNECTED, 10), (SOFTMAX, -1)]

for i in (0.001, 0.002, 0.004):
	(inputTrainM, outputTrainM, inputTestM, outputTestM) = run(mnist, arhitecture, i, 4000, 20000)
	x = inputTestM
	y = outputTestM
	plot(x, y, label="Learning_rate = " + str(i))

legend(prop={'size':6})
show()
コード例 #37
0
ファイル: BDT.py プロジェクト: david0811/RISE
if cfg.source == "BDTvarBs2phiphi.csv":
    cfg.name += "Bs2phiphi"
elif cfg.source != "BDTvar.csv":
    raise NameError("Invalid Source Selection!")
    
if cfg.dataset == True:
    import datasets as ds
    if cfg.quick == True:
        ds.run(cfg.source, quick = True)
    else:
        ds.run(cfg.source)

if cfg.train == True:
    import train as tr
    if cfg.quick == True:
        tr.run(cfg.name, int(cfg.maxdepth), int(cfg.estimators), quick = True)
    else:
        tr.run(cfg.name, int(cfg.maxdepth), int(cfg.estimators))

if cfg.gridsearch == True:
    import gridsearch as g
    if cfg.quick == True:
        g.run(cfg.name, quick = True)
    else:
        g.run(cfg.name)

if cfg.roc == True:
    import roccurves as rc
    if cfg.quick == True:
        rc.run(cfg.name + str(cfg.maxdepth), quick = True)
    else:
コード例 #38
0
cifar = load_cifrar()
arhitecture1 = [
    (CONV, (6, 14, 14), 6, 2),
    (RELU, -1),
    (MAX_POOLING, (6, 7, 7)),
    (LINEARIZE, -1),
    (FULLY_CONNECTED, 49),
    (FULLY_CONNECTED, 10),
    (SOFTMAX, -1),
]
arhitecture2 = [
    (CONV, (9, 14, 14), 6, 2),
    (RELU, -1),
    (MAX_POOLING, (9, 7, 7)),
    (LINEARIZE, -1),
    (FULLY_CONNECTED, 49),
    (FULLY_CONNECTED, 10),
    (SOFTMAX, -1),
]

index = 0
for i in (arhitecture1, arhitecture2):
    index += 1
    (inputTrainC, outputTrainC, inputTestC, outputTestC) = run(cifar, i, 0.002, 2000, 10000)
    x = inputTestC
    y = outputTestC
    plot(x, y, label="Architecture = " + str(index))

legend(prop={"size": 6})
show()
コード例 #39
0
### Main

if __name__ == '__main__':
    # parse arguments
    argp = argparse.ArgumentParser(description=__doc__.strip().split("\n", 1)[0])
    argp.add_argument('experiment_dir',
        help="directory for storing trained model and other resources")
    argp.add_argument('dataset_path',
        help="dataset text corpus in .zip format")
    args = argp.parse_args()

    # defaults
    vocab_size = None
    skipgram_window_size = 1

    # load datasets
    log.info("load datasets")
    x_vocab, y_skipgram, doc_ids, words_all, word2id = load(args.dataset_path, vocab_size=vocab_size, skipgram_window_size=skipgram_window_size)
    vocab_size = len(word2id)

    print "x_vocab:", x_vocab[0].shape, sum([ x.nbytes  for x in x_vocab ])
    if y_skipgram:
        print "y_skipgram:", y_skipgram[0].shape, sum([ y.nbytes  for y in y_skipgram ])
    else:
        print "y_skipgram:", (x_vocab[0].shape[0] - skipgram_window_size, skipgram_window_size), "constant"
    print "vocab_size:", vocab_size

    # run train driver
    log.info("run train driver")
    run(x_vocab, y_skipgram, vocab_size)
コード例 #40
0
ファイル: main.py プロジェクト: music960633/104_MLDS
def main():
  train.run()
コード例 #41
0
#!/usr/bin/python

from pylab import plot, show, arange, legend, xlabel, ylabel
from train import run
from data_loader_mnist import load_mnist
from data_loader_cifrar import load_cifrar
from feed_forward import LINEARIZE, FULLY_CONNECTED, TANH, SOFTMAX, CONV, RELU, MAX_POOLING

xlabel('Numarul de imagini de antrenare')
ylabel('Acuratete Teste')

mnist = load_mnist()
arhitecture1 = [(CONV, (6, 14, 14), 6, 2), (RELU, -1), (MAX_POOLING, (6, 7, 7)), (LINEARIZE, -1), (FULLY_CONNECTED, 10), (SOFTMAX, -1)]
arhitecture2 = [(CONV, (6, 28, 28), 5, 1), (RELU, -1), (MAX_POOLING, (6, 14, 14)), (CONV, (16, 10, 10), 5, 1), (RELU, -1), (MAX_POOLING, (16, 5, 5)), (LINEARIZE, -1), (FULLY_CONNECTED, 10), (SOFTMAX, -1)]

index = 0
for i in (arhitecture1, arhitecture2):
	index += 1
	(inputTrainC, outputTrainC, inputTestC, outputTestC) = run(mnist, i, 0.002, 2000, 10000) 
	x = inputTestC
	y = outputTestC
	plot(x, y, label="Architecture = " + str(index))

legend(prop={'size':6})
show()