def sample(args):
    with open(os.path.join(args.save_dir, 'config.pkl'), 'rb') as f:
        saved_args = cPickle.load(f)
    with open(os.path.join(args.save_dir, 'words_vocab.pkl'), 'rb') as f:
        words, vocab = cPickle.load(f)
    model = Model(saved_args, True)
    if args.seed is None:
        # init random state using seed
        np.random.seed(args.seed)
        random.seed(args.seed)

    seed_list = set()
    while len(seed_list) < args.num_trace:
        seed_list.add(random.randint(0, 2**31 - 1))
    utils.init_dir(args.output_folder)
    print(args)
    with tf.Session() as sess:
        tf.initialize_all_variables().run()
        saver = tf.train.Saver(tf.all_variables())
        ckpt = tf.train.get_checkpoint_state(args.save_dir)
        if ckpt and ckpt.model_checkpoint_path:
            saver.restore(sess, ckpt.model_checkpoint_path)
            for the_seed in seed_list:
                seed_sample(
                    sess, words, vocab, model, args.prime_text_file, the_seed,
                    args.output_folder + '/seed_' + str(the_seed) + '.txt')
def test(data, model, optimizer, logger, config):
    test_batches = (data.DATA_SIZE[1] + config["batch_size"] -
                    1) // config["batch_size"]
    for param in model.parameters():
        param.requires_grad = False
    model.eval()

    prediction = np.zeros(data.DATA_SIZE[1], dtype=np.uint8)
    for i in range(test_batches):
        inputs = Variable(torch.from_numpy(
            data.data_test[i * config["batch_size"]:min(
                (i + 1) * config["batch_size"], data.DATA_SIZE[1]), :]),
                          requires_grad=False).view(-1, 1, 45, 45)
        if config["cuda"] and torch.cuda.is_available():
            inputs = inputs.cuda()
        outputs = model(inputs)
        prediction[i * config["batch_size"]:min(
            (i + 1) * config["batch_size"], data.DATA_SIZE[1])] = np.argmax(
                outputs.data.cpu().numpy(), axis=1)

    print('Accuracy: %0.2f' %
          (100 * accuracy_score(data.label_test, prediction)))
    init_dir(config['output_dir'])
    np.save(
        os.path.join(config['output_dir'], '%s_pred.npy' % config['method']),
        prediction)
Exemple #3
0
def train(args):
    base_dir = args.base_dir
    dirs = init_dir(base_dir)
    init_log(dirs['log'])
    config_dir = args.config_dir
    copy_file(config_dir, dirs['data'])
    config = configparser.ConfigParser()
    config.read(config_dir)

    # init env
    env = init_env(config['ENV_CONFIG'])
    logging.info('Training: a dim %r, agent dim: %d' % (env.n_a_ls, env.n_agent))

    # init step counter
    total_step = int(config.getfloat('TRAIN_CONFIG', 'total_step'))
    test_step = int(config.getfloat('TRAIN_CONFIG', 'test_interval'))
    log_step = int(config.getfloat('TRAIN_CONFIG', 'log_interval'))
    global_counter = Counter(total_step, test_step, log_step)

    # init centralized or multi agent
    seed = config.getint('ENV_CONFIG', 'seed')
    model = init_agent(env, config['MODEL_CONFIG'], total_step, seed)

    # disable multi-threading for safe SUMO implementation
    summary_writer = tf.summary.FileWriter(dirs['log'])
    trainer = Trainer(env, model, global_counter, summary_writer, output_path=dirs['data'])
    trainer.run()

    # save model
    final_step = global_counter.cur_step
    logging.info('Training: save final model at step %d ...' % final_step)
    model.save(dirs['model'], final_step)
def main():
    utils.init_dir()
    parser = argparse.ArgumentParser(description='For different background job')
    parser.add_argument('function', type=str, help='the job')
    parser.add_argument('--config_path', type=str, help='path of config file')
    args = parser.parse_args()

    #udp_socket()
    main_config = None
    if args.config_path:
        main_config = utils.parse_config(args.config_path)
    if args.function == "upload_iperf_wireshark":
        upload_iperf_wireshark(main_config)
    if args.function == "download_iperf_wireshark":
        download_iperf_wireshark(main_config)
    if args.function == "download_socket":
        download_socket()
Exemple #5
0
    def __init__(self, parent, docfiles, settings, pos, size, pages):
        utils.init_dir()
        wx.Frame.__init__(self, parent, pos=pos, size=size, title='rbook')
        self.notebook = fnb.FlatNotebook(self, agwStyle=fnb.FNB_X_ON_TAB | \
                                                        fnb.FNB_NO_X_BUTTON | \
                                                        fnb.FNB_NO_NAV_BUTTONS | \
                                                        fnb.FNB_NO_TAB_FOCUS)
        self.statusbar = self.CreateStatusBar()
        self.statusbar.SetFieldsCount(2)
        self.currentdir = os.path.expanduser('~')
        self.settings = settings
        self.pages = pages
        self.textctrl = wx.TextCtrl(self, size=(0,0))

        for docfile in docfiles:
            docname, ext = os.path.splitext(os.path.basename(docfile))
            try:
                doc_viewer = DocViewer(self.notebook, os.path.abspath(docfile),
                                       ext.upper(), self.settings['showoutline'])
                self.notebook.AddPage(doc_viewer, docname)
            except IOError as inst:
                self.statusbar.SetStatusText('!Error: %s' % inst.args)
        
        if self.notebook.GetPageCount() > 0:
            doc_viewer = self.notebook.GetPage(0)
            doc_viewer.doc_scroll.panel.SetFocus()
            if self.settings['autochdir']:
                self.currentdir = os.path.dirname(doc_viewer.filepath)
            self.update_statusbar(self.notebook.GetPage(0))
        else:
            self.textctrl.SetFocus()

        self.Bind(wx.EVT_CLOSE, self.on_close)
        self.Bind(fnb.EVT_FLATNOTEBOOK_PAGE_CHANGED, self.on_page_changed, self.notebook)
        self.Bind(fnb.EVT_FLATNOTEBOOK_PAGE_CLOSED, self.on_page_closed, self.notebook)
        self.Bind(wx.EVT_KEY_DOWN, self.handle_keys)
        self.textctrl.Bind(wx.EVT_TEXT, self.on_text)
        self.textctrl.Bind(wx.EVT_KEY_DOWN, self.text_key_down)

        sizer = wx.BoxSizer()
        sizer.Add(self.notebook, 1, wx.EXPAND)
        self.SetSizer(sizer)
        self.SetAutoLayout(True)
Exemple #6
0
def evaluate(args):
    base_dir = args.base_dir
    dirs = init_dir(base_dir, pathes=['eva_data', 'eva_log'])
    init_log(dirs['eva_log'])
    # enforce the same evaluation seeds across agents
    seeds = args.evaluate_seeds
    logging.info('Evaluation: random seeds: %s' % seeds)
    if not seeds:
        seeds = []
    else:
        seeds = [int(s) for s in seeds.split(',')]
    evaluate_fn(base_dir, dirs['eva_data'], seeds, 1)
Exemple #7
0
def train(args):
    base_dir = args.base_dir
    dirs = init_dir(base_dir) #utils
    init_log(dirs['log'])#utils
    config_dir = args.config_dir
    copy_file(config_dir, dirs['data'])
    config = configparser.ConfigParser()
    config.read(config_dir)
    in_test, post_test = init_test_flag(args.test_mode)

# init env
    env = init_env(config['ENV_CONFIG']) #seeonce
    logging.info('Training: s dim: %d, a dim %d, s dim ls: %r, a dim ls: %r' %
                 (env.n_s, env.n_a, env.n_s_ls, env.n_a_ls)) #logging?


    # init step counter
    total_step = int(config.getfloat('TRAIN_CONFIG', 'total_step'))
    test_step = int(config.getfloat('TRAIN_CONFIG', 'test_interval'))
    log_step = int(config.getfloat('TRAIN_CONFIG', 'log_interval'))
    global_counter = Counter(total_step, test_step, log_step)#what is this
# init centralized or multi agent

    seed = config.getint('ENV_CONFIG', 'seed')
    if env.agent == 'iddpg':
        model = IDDPG(env.n_s_ls, env.n_a_ls, env.n_w_ls, total_step,
                     config['MODEL_CONFIG'], seed=seed)
    elif env.agent == 'maddpg':  #TODO: Add MADDPG
        model = MADDPG(env.n_s_ls, env.n_a_ls, env.n_w_ls, env.n_f_ls, total_step,
                     config['MODEL_CONFIG'], seed=seed)
    summary_writer = tf.summary.FileWriter(dirs['log'])#what is this
    trainer = Trainer(env, model, global_counter, summary_writer, in_test, output_path=dirs['data'])#utils
    trainer.run()
   #if post_test: #how?
    #    tester = Tester(env, model, global_counter, summary_writer, dirs['data'])
     #   tester.run_offline(dirs['data'])#utils

    # save model#what's this
    final_step = global_counter.cur_step
    logging.info('Training: save final model at step %d ...' % final_step)
    model.save(dirs['model'], final_step)
Exemple #8
0
def evaluate(args):
    base_dir = args.base_dir
    dirs = init_dir(base_dir, pathes=['eva_data', 'eva_log'])
    init_log(dirs['eva_log'])
    agents = args.agents.split(',')
    # enforce the same evaluation seeds across agents
    seeds = args.evaluate_seeds
    logging.info('Evaluation: random seeds: %s' % seeds)
    if not seeds:
        seeds = []
    else:
        seeds = [int(s) for s in seeds.split(',')]
    threads = []
    for i, agent in enumerate(agents):
        agent_dir = base_dir + '/' + agent
        thread = threading.Thread(target=evaluate_fn,
                                  args=(agent_dir, dirs['eva_data'], seeds, i))
        thread.start()
        threads.append(thread)
    for thread in threads:
        thread.join()
Exemple #9
0
def train(args):
    base_dir = args.base_dir
    dirs = init_dir(base_dir)
    init_log(dirs['log'])
    config_dir = args.config_dir
    copy_file(config_dir, dirs['data'])
    config = configparser.ConfigParser()
    config.read(config_dir)
    in_test, post_test = init_test_flag(args.test_mode)

    # init env
    env = init_env(config['ENV_CONFIG'])
    logging.info('Training: s dim: %d, a dim %d, s dim ls: %r, a dim ls: %r' %
                 (env.n_s, env.n_a, env.n_s_ls, env.n_a_ls))

    # init step counter
    total_step = int(config.getfloat('TRAIN_CONFIG', 'total_step'))
    test_step = int(config.getfloat('TRAIN_CONFIG', 'test_interval'))
    log_step = int(config.getfloat('TRAIN_CONFIG', 'log_interval'))
    global_counter = Counter(total_step, test_step, log_step)

    # init centralized or multi agent
    seed = config.getint('ENV_CONFIG', 'seed')
    # coord = tf.train.Coordinator()

    # if env.agent == 'a2c':
    #     model = A2C(env.n_s, env.n_a, total_step,
    #                 config['MODEL_CONFIG'], seed=seed)
    if env.agent == 'ia2c':
        model = IA2C(env.n_s_ls,
                     env.n_a_ls,
                     env.n_w_ls,
                     total_step,
                     config['MODEL_CONFIG'],
                     seed=seed)
    elif env.agent == 'ma2c':
        model = MA2C(env.n_s_ls,
                     env.n_a_ls,
                     env.n_w_ls,
                     env.n_f_ls,
                     total_step,
                     config['MODEL_CONFIG'],
                     seed=seed)
    elif env.agent == 'iqld':
        model = IQL(env.n_s_ls,
                    env.n_a_ls,
                    env.n_w_ls,
                    total_step,
                    config['MODEL_CONFIG'],
                    seed=0,
                    model_type='dqn')
    else:
        model = IQL(env.n_s_ls,
                    env.n_a_ls,
                    env.n_w_ls,
                    total_step,
                    config['MODEL_CONFIG'],
                    seed=0,
                    model_type='lr')

    # disable multi-threading for safe SUMO implementation
    # threads = []
    summary_writer = tf.summary.FileWriter(dirs['log'])
    trainer = Trainer(env,
                      model,
                      global_counter,
                      summary_writer,
                      in_test,
                      output_path=dirs['data'])
    trainer.run()
    # if in_test or post_test:
    #     # assign a different port for test env
    #     test_env = init_env(config['ENV_CONFIG'], port=1)
    #     tester = Tester(test_env, model, global_counter, summary_writer, dirs['data'])

    # def train_fn():
    #     trainer.run(coord)

    # thread = threading.Thread(target=train_fn)
    # thread.start()
    # threads.append(thread)
    # if in_test:
    #     def test_fn():
    #         tester.run_online(coord)
    #     thread = threading.Thread(target=test_fn)
    #     thread.start()
    #     threads.append(thread)
    # coord.join(threads)

    # post-training test
    if post_test:
        tester = Tester(env, model, global_counter, summary_writer,
                        dirs['data'])
        tester.run_offline(dirs['data'])

    # save model
    final_step = global_counter.cur_step
    logging.info('Training: save final model at step %d ...' % final_step)
    model.save(dirs['model'], final_step)
Exemple #10
0
if __name__ == "__main__":
    args = parser.parse_args()

    if args.traditional_methods:  # apply traditional classification methods on MNIST
        os.system('python3 traditional-methods/%s' % args.method)
    else:  # using DNNs or CNNs
        with open(os.path.join(args.config_dir, "%s.json" % args.method)) as f:
            config = json.load(f)
        for arg in vars(args):
            if arg not in config.keys():
                config[arg] = getattr(args, arg)
        show_config(config)

        # initialization
        init_dir(args.model_dir)
        init_dir(args.log_dir)
        np.random.seed(config["seed"])
        torch.manual_seed(config["seed"])
        torch.set_default_tensor_type('torch.FloatTensor')

        # load data
        data = MnistLoader(flatten=config["flatten"], data_path=args.data_dir)

        # apply feature extraction on data
        if args.feature_extracting_method != None:
            f_t = locate("utils.%s" % args.feature_extracting_method)
            data.data_train = f_t(data.data_train, args.dim).astype(np.float32)
            data.data_test = f_t(data.data_test, args.dim).astype(np.float32)

        # initialize model
Exemple #11
0
def train(args):
    base_dir = args.base_dir
    dirs = init_dir(base_dir)
    init_log(dirs['log'])
    config_dir = args.config_dir
    copy_file(config_dir, dirs['data'])
    config = configparser.ConfigParser()
    config.read(config_dir)
    in_test, post_test = init_test_flag(args.test_mode)

    # init env
    env = init_env(config['ENV_CONFIG'])
    logging.info('Training: s dim: %d, a dim %d, s dim ls: %r, a dim ls: %r' %
                 (env.n_s, env.n_a, env.n_s_ls, env.n_a_ls))

    # init step counter
    total_step = int(config.getfloat('TRAIN_CONFIG', 'total_step'))  #1e6
    test_step = int(config.getfloat('TRAIN_CONFIG', 'test_interval'))  #2e4
    log_step = int(config.getfloat('TRAIN_CONFIG', 'log_interval'))  #1e4
    global_counter = Counter(total_step, test_step, log_step)

    # init centralized or multi agent
    seed = config.getint('ENV_CONFIG', 'seed')  #12
    # coord = tf.train.Coordinator()

    if env.agent == 'ia2c':
        model = IA2C(env.n_s_ls,
                     env.n_a_ls,
                     env.n_w_ls,
                     total_step,
                     config['MODEL_CONFIG'],
                     seed=seed)
    elif env.agent == 'ma2c':
        model = MA2C(env.n_s_ls,
                     env.n_a_ls,
                     env.n_w_ls,
                     env.n_f_ls,
                     total_step,
                     config['MODEL_CONFIG'],
                     seed=seed)
    elif env.agent == 'codql':
        print('This is codql')
        num_agents = len(env.n_s_ls)
        print('num_agents:', num_agents)
        a_dim = env.n_a_ls[0]  # ?????????????????? dim ??or num??
        print('a_dim:', a_dim)
        s_dim = env.n_s_ls[0]
        print('env.n_s_ls=', s_dim)
        s_dim_wait = env.n_w_ls[0]
        print('s_dim_wait:', s_dim_wait)
        #obs_space = s_dim # XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXxx state dim Error
        model = MFQ(nb_agent=num_agents,
                    a_dim=a_dim,
                    s_dim=s_dim,
                    s_dim_wave=s_dim - s_dim_wait,
                    s_dim_wait=s_dim_wait,
                    config=config['MODEL_CONFIG'])
    elif env.agent == 'dqn':
        model = DQN(nb_agent=len(env.n_s_ls),
                    a_dim=env.n_a_ls[0],
                    s_dim=env.n_s_ls[0],
                    s_dim_wave=env.n_s_ls[0] - env.n_w_ls[0],
                    s_dim_wait=env.n_w_ls[0],
                    config=config['MODEL_CONFIG'],
                    doubleQ=False)  #doubleQ=False denotes dqn else ddqn
    elif env.agent == 'ddpg':
        model = DDPGEN(nb_agent=len(env.n_s_ls),
                       share_params=True,
                       a_dim=env.n_a_ls[0],
                       s_dim=env.n_s_ls[0],
                       s_dim_wave=env.n_s_ls[0] - env.n_w_ls[0],
                       s_dim_wait=env.n_w_ls[0])
    elif env.agent == 'iqld':
        model = IQL(env.n_s_ls,
                    env.n_a_ls,
                    env.n_w_ls,
                    total_step,
                    config['MODEL_CONFIG'],
                    seed=0,
                    model_type='dqn')
    else:
        model = IQL(env.n_s_ls,
                    env.n_a_ls,
                    env.n_w_ls,
                    total_step,
                    config['MODEL_CONFIG'],
                    seed=0,
                    model_type='lr')

    summary_writer = tf.summary.FileWriter(dirs['log'])
    trainer = Trainer(env,
                      model,
                      global_counter,
                      summary_writer,
                      in_test,
                      output_path=dirs['data'])
    trainer.run()

    # save model
    final_step = global_counter.cur_step
    logging.info('Training: save final model at step %d ...' % final_step)
    model.save(dirs['model'], final_step)
    seos_idx, teos_idx = n_src_words - 1, n_trg_words - 1
    sv = ensure_special_tokens(
        sv, bos_idx=0, eos_idx=seos_idx, unk_idx=config['unk_id'])
    tv = ensure_special_tokens(
        tv, bos_idx=0, eos_idx=teos_idx, unk_idx=config['unk_id'])

    # the tv is originally:
    #   {'UNK': 1, '<s>': 0, '</s>': 0, 'is': 5, ...}
    # after ensure_special_tokens, the tv becomes:
    #   {'<UNK>': 1, '<S>': 0, '</S>': trg_vocab_size-1, 'is': 5, ...}
    tv_i2w = {i: w for w, i in tv.iteritems()}
    sv_i2w = {i: w for w, i in sv.iteritems()}
    # after reversing, the tv_i2w become:
    #   {1: '<UNK>', 0: '<S>', trg_vocab_size-1: '</S>', 5: 'is', ...}

    init_dir(config['models_dir'])
    init_dir(config['val_out_dir'])
    init_dir(config['tst_out_dir'])

    source = T.lmatrix('source')
    target = T.lmatrix('target')
    source_mask = T.matrix('source_mask')
    target_mask = T.matrix('target_mask')
    # for each batch which is a data in tr_stream.get_epoch_iterator(),
    # we set the maximum sentence length in this batch as sent_len;
    # source, source_mask, target and target_mask are all matrix shape: (batch_size * sent_len)
    # and their type are all theano.tensor.var.TensorVariable

    ltopk_trg_vocab_idx = []
    if config['use_mv']:
        # no need to use the whole vocabulary
Exemple #13
0
        # s = np.asarray([[153, 1660, 5137, 29999]])
        # s = np.asarray([[3490]])
        t = np.asarray([[0, 10782, 2102, 1735, 4, 1829, 1657, 29999, 0]])
        pv = np.asarray([0, 10782, 2102, 1735, 4, 1829, 1657, 29999])
        translator.trans_samples(s, t)
        sys.exit(0)

    # trans sentece
    viter = dev_stream.get_epoch_iterator()

    avg_merges_rate, trans = translator.single_trans_valid(viter)
    # trans = translator.multi_process(viter, n_process=nprocess)

    outdir = args.workspace

    init_dir(outdir)

    outprefix = outdir + '/trans'
    valid_out = "{}_e{}_upd{}_b{}m{}_kl{}bch{}_ln{}_cp{}".format(
        outprefix, epoch, batch, beam_size, search_mode, kl, args.use_batch, alpha, beta)
    fVal_save = open(valid_out, 'w')    # valids/trans
    fVal_save.writelines(trans)
    fVal_save.close()

    mteval_bleu, multi_bleu = valid_bleu(valid_out, config['val_tst_dir'], config['val_prefix'])
    mteval_bleu = float(mteval_bleu)

    score_file_name = '{}/mteval_bleu.pkl'.format(outdir)
    scores = []
    if os.path.exists(score_file_name):
        with open(score_file_name) as score_file:
Exemple #14
0
def train(args):
    base_dir = args.base_dir
    dirs = init_dir(base_dir)
    init_log(dirs['log'])
    config_dir = args.config_dir
    copy_file(config_dir, dirs['data'])
    config = configparser.ConfigParser()
    config.read(config_dir)
    in_test, post_test = init_test_flag(args.test_mode)

    # init env
    env = init_env(config['ENV_CONFIG'])
    logging.info('Training: a dim %d, agent dim: %d' % (env.n_a, env.n_agent))

    # init step counter
    total_step = int(config.getfloat('TRAIN_CONFIG', 'total_step'))
    test_step = int(config.getfloat('TRAIN_CONFIG', 'test_interval'))
    log_step = int(config.getfloat('TRAIN_CONFIG', 'log_interval'))
    global_counter = Counter(total_step, test_step, log_step)

    # init centralized or multi agent
    seed = config.getint('ENV_CONFIG', 'seed')

    if env.agent == 'ia2c':
        model = IA2C(env.n_s_ls,
                     env.n_a,
                     env.neighbor_mask,
                     env.distance_mask,
                     env.coop_gamma,
                     total_step,
                     config['MODEL_CONFIG'],
                     seed=seed)
    elif env.agent == 'ia2c_fp':
        model = IA2C_FP(env.n_s_ls,
                        env.n_a,
                        env.neighbor_mask,
                        env.distance_mask,
                        env.coop_gamma,
                        total_step,
                        config['MODEL_CONFIG'],
                        seed=seed)
    elif env.agent == 'ma2c_nc':
        model = MA2C_NC(env.n_s,
                        env.n_a,
                        env.neighbor_mask,
                        env.distance_mask,
                        env.coop_gamma,
                        total_step,
                        config['MODEL_CONFIG'],
                        seed=seed)
    else:
        model = None

    # disable multi-threading for safe SUMO implementation
    summary_writer = tf.summary.FileWriter(dirs['log'])
    trainer = Trainer(env,
                      model,
                      global_counter,
                      summary_writer,
                      in_test,
                      output_path=dirs['data'])
    trainer.run()

    # save model
    final_step = global_counter.cur_step
    logging.info('Training: save final model at step %d ...' % final_step)
    model.save(dirs['model'], final_step)

    # post-training test
    if post_test:
        test_dirs = init_dir(base_dir, pathes=['eva_data'])
        evaluator = Evaluator(env, model, test_dirs['eva_data'])
        evaluator.run()