Ejemplo n.º 1
0
 def test_multiply_other_counter(self):
     a = Counter({'b': 5, 'c': 2})
     b = Counter({'a': 3, 'b': 5})
     c = a * b
     self.assertAlmostEqual(c['a'], 0)
     self.assertAlmostEqual(c['b'], 25)
     self.assertAlmostEqual(c['c'], 0)
Ejemplo n.º 2
0
 def test_add_other_counter(self):
     a = Counter({'b': 5, 'c': 2})
     b = Counter({'a': 3, 'b': 5})
     c = a + b
     self.assertAlmostEqual(c['a'], 3)
     self.assertAlmostEqual(c['b'], 10)
     self.assertAlmostEqual(c['c'], 2)
Ejemplo n.º 3
0
    def __init__(self, log_dir):
        super().__init__(name="Logger")
        self.device = Params.DEVICE

        with tf.device(self.device), self.name_scope:

            self.episode_counter = Counter("episode_counter",
                                           start=-1,
                                           dtype=tf.int32)
            self.actor_steps_counter = Counter("actor_steps_counter",
                                               start=0,
                                               dtype=tf.int32)

            self.get_time = lambda: tf.reshape(
                tf.py_function(time.time, [], Tout=tf.float64), ()) * 1000
            self.actor_time = tf.Variable(self.get_time())
            self.learner_time = tf.Variable(self.get_time())

            self.learner_log_steps = tf.cast(Params.LEARNER_LOG_STEPS,
                                             tf.float64)

            # Init Tensorboard writer
            if Params.LOG_TENSORBOARD:
                self.log_dir = log_dir
                self.writer = tf.summary.create_file_writer(self.log_dir)
            else:
                self.writer = None

            # Log parameters
            self.log_params()
Ejemplo n.º 4
0
    def test_positive(self):
        a = Counter({'a': 1})
        self.assertTrue(a.positive())

        a = Counter({'a': -1})
        self.assertFalse(a.positive())

        a = Counter({'a': 1, 'b': -1})
        self.assertFalse(a.positive())
Ejemplo n.º 5
0
    def test_base_impossible_2(self):
        A = sympy.Symbol('A')
        B = sympy.Symbol('B')

        state_0 = Counter({A: 2})
        substrate = Counter({A: 3})
        products = Counter({B: 2})
        kinetic = A * (A - 1)
        new_state, kinetic_val = shift(state_0, substrate, products, kinetic)
        self.assertEqual(new_state, None)
        self.assertEqual(kinetic_val, None)
Ejemplo n.º 6
0
 def naive_distribution(self, state_0, final_time, burnout_time):
     from collections import Counter
     distribution = Counter()
     conteggi = Counter()
     for state, time, dt in self.gillespie(state_0, final_time):
         if time<burnout_time:
             continue
         idx = tuple(state.values())
         distribution[idx]+=dt
         conteggi[idx]+=1 
     return state_0.keys(), distribution, conteggi
Ejemplo n.º 7
0
 def test_add_dict_reverse(self):
     a = Counter({'b': 5, 'c': 2})
     b = {'a': 3, 'b': 5}
     c = b + a
     self.assertAlmostEqual(c['a'], 3)
     self.assertAlmostEqual(c['b'], 10)
     self.assertAlmostEqual(c['c'], 2)
Ejemplo n.º 8
0
def train(args):
    base_dir = args.base_dir
    dirs = init_dir(base_dir)
    init_log(dirs['log'])
    config_dir = args.config_dir
    copy_file(config_dir, dirs['data'])
    config = configparser.ConfigParser()
    config.read(config_dir)

    # init env
    env = init_env(config['ENV_CONFIG'])
    logging.info('Training: a dim %r, agent dim: %d' % (env.n_a_ls, env.n_agent))

    # init step counter
    total_step = int(config.getfloat('TRAIN_CONFIG', 'total_step'))
    test_step = int(config.getfloat('TRAIN_CONFIG', 'test_interval'))
    log_step = int(config.getfloat('TRAIN_CONFIG', 'log_interval'))
    global_counter = Counter(total_step, test_step, log_step)

    # init centralized or multi agent
    seed = config.getint('ENV_CONFIG', 'seed')
    model = init_agent(env, config['MODEL_CONFIG'], total_step, seed)

    # disable multi-threading for safe SUMO implementation
    summary_writer = tf.summary.FileWriter(dirs['log'])
    trainer = Trainer(env, model, global_counter, summary_writer, output_path=dirs['data'])
    trainer.run()

    # save model
    final_step = global_counter.cur_step
    logging.info('Training: save final model at step %d ...' % final_step)
    model.save(dirs['model'], final_step)
Ejemplo n.º 9
0
 def __init__(self, args, k, train_data):
     self.k = k
     self.args = args
     self.device = "cuda:0" if torch.cuda.is_available() else "cpu"
     self.data = torch.tensor(train_data[0]).to(self.device)
     self.label = torch.tensor(train_data[1]).to(self.device)
     self.counter = Counter()
Ejemplo n.º 10
0
def main():
    """
    Called when midi_store is run directly. Starts a series of database worker processes and kicks off the parsing of
     the MIDI files and storing them in the database(s).
    """
    parser = OptionParser()

    parser.add_option("-d", "--data-directory", dest="data_directory", default="data/")
    parser.add_option("-t", "--pool-size", dest="pool_size", default=8, type="int")
    parser.add_option("-u", "--username", dest="db_username", default="postgres")
    parser.add_option("-p", "--password", dest="db_password", default="postgres")

    (options, args) = parser.parse_args()

    # construct the mp.Queue of midi files to process
    q = Queue()
    for root, dirnames, filenames in os.walk(options.data_directory):
        for filename in fnmatch.filter(filenames, '*.mid'):
            midiPath = os.path.abspath(os.path.join(root, filename))
            q.put(midiPath)

    # construct the series of database engines
    engines = get_engines(options.pool_size,options.db_username,options.db_password)
    processes = []
    counter = Counter(0)
    for i in xrange(options.pool_size):
        p = Runner(q,engines[i],counter)
        p.start()
        processes.append(p)

    # wait for all engines to complete.
    for p in processes:
        p.join()
Ejemplo n.º 11
0
 def test_normalize_1(self):
     a = Counter({'a': 3, 'b': 5, 'c': 2})
     b = a.normalize()
     self.assertAlmostEqual(b.total(), 1.0)
     self.assertAlmostEqual(b['a'], 0.3)
     self.assertAlmostEqual(b['b'], 0.5)
     self.assertAlmostEqual(b['c'], 0.2)
     self.assertIs(a.keymap, b.keymap)
Ejemplo n.º 12
0
def ExtractDataTrack(
        track: List[mido.Message],
        exclusionThreshold: float) -> Tuple(List[Interval], List[int]):
    standaloneIntervals = []
    deltaTimes = []

    times = [track[0].time]
    frequencies = [Note.FromHeight(track[0].note).Frequency]

    for idMsg in range(1, len(track)):
        dt = track[idMsg].time - track[idMsg - 1].time
        if dt < exclusionThreshold:
            deltaTimes.append(dt)

            n0 = Note.FromHeight(track[idMsg].note)
            n1 = Note.FromHeight(track[idMsg - 1].note)

            times.append(track[idMsg].time)
            frequencies.append(n0.Frequency)

            currInterval = Interval.FromNotes(n0, n1)
            standaloneIntervals.append(currInterval)

    tempSuccessionIntervals = [
        (standaloneIntervals[idInterval - 1].ShortStr(),
         standaloneIntervals[idInterval].ShortStr())
        for idInterval in range(1, len(standaloneIntervals))
    ]

    successionIntervals = LayeredCounter()
    for elem in tempSuccessionIntervals:
        successionIntervals[elem[0]][elem[1]] += 1
    #print(successionIntervals)

    standaloneIntervals.sort()

    counterStandaloneIntervals = Counter()
    counterStandaloneIntervals.AddListElements(
        [interval.ShortStr() for interval in standaloneIntervals])

    outDeltatimes = Counter()
    outDeltatimes.AddListElements(deltaTimes)

    return times, frequencies, outDeltatimes, counterStandaloneIntervals, successionIntervals
Ejemplo n.º 13
0
    def __init__(self, env, hidden_layer=[64, 64]):
        self.env = env
        #self.env.env.disableViewer = False
        self.num_inputs = env.observation_space.shape[0]
        self.num_outputs = env.action_space.shape[0]
        self.hidden_layer = hidden_layer

        self.params = Params()

        self.Net = ActorCriticNet
        self.model = self.Net(self.num_inputs, self.num_outputs,
                              self.hidden_layer)
        self.model.share_memory()
        self.shared_obs_stats = Shared_obs_stats(self.num_inputs)
        self.memory = ReplayMemory(10000000)
        self.value_memory = ReplayMemory(10000000)
        self.test_mean = []
        self.test_std = []

        self.noisy_test_mean = []
        self.noisy_test_std = []
        self.fig = plt.figure()
        #self.fig2 = plt.figure()
        self.lr = self.params.lr
        plt.show(block=False)

        self.test_list = []
        self.noisy_test_list = []
        self.queue = mp.Queue()
        self.value_queue = mp.Queue()

        self.mpdone = [mp.Event(), mp.Event(), mp.Event(), mp.Event()]

        self.process = []
        self.traffic_light = TrafficLight()
        self.counter = Counter()

        self.best_trajectory = ReplayMemory(5000)
        self.best_score_queue = mp.Queue()
        self.best_score = mp.Value("f", 0)
        self.max_reward = mp.Value("f", 1)

        self.expert_trajectory = ReplayMemory(1e7)

        self.validation_trajectory = ReplayMemory(6000 * 9)

        self.best_validation = 1.0
        self.current_best_validation = 1.0

        self.return_obs_stats = Shared_obs_stats(1)

        self.gpu_model = self.Net(self.num_inputs, self.num_outputs,
                                  self.hidden_layer)

        self.base_controller = None
Ejemplo n.º 14
0
 def __init__(self, model):
     super().__init__(model)
     normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                      std=[0.229, 0.224, 0.225])
     self.transform = transforms.Compose([
         transforms.RandomResizedCrop(224),
         transforms.RandomHorizontalFlip(),
         transforms.ToTensor(),
         normalize,
     ])
     self.stat = Counter()
Ejemplo n.º 15
0
 def __init__(self, model):
     self.model = model
     self.device = next(model.parameters()).device
     self.transform = transforms.Compose([
         transforms.Pad(4),
         transforms.RandomHorizontalFlip(),
         transforms.RandomCrop(32),
         transforms.ToTensor()
     ])
     self.stat = Counter()
     self.path = "./data/models_dict/%s.ckpt" % self.model.__class__.__name__
Ejemplo n.º 16
0
def main():
    train_transforms = Compose([
        Resize((640, 320)),
        RandomHorizontalFlip(),
        RandomCrop((640, 320), 20),
        ToTensor()
    ])
    train_dataset = AmapDataset(DATA_DIR, "train", transforms=train_transforms)
    train_loader = DataLoader(train_dataset,
                              BATCH_SIZE,
                              True,
                              num_workers=NUM_WORKERS,
                              collate_fn=collate_fn)

    device = torch.device(DEVICE)
    model = MyModule(num_classes=3).to(device)

    params = [p for p in model.parameters() if p.requires_grad]
    optimizer = torch.optim.Adam(params, lr=0.001, weight_decay=0.005)

    if os.path.exists(MODEL_FILE):
        model.load_state_dict(torch.load(MODEL_FILE))

    for epoch in range(1, EPOCHS + 1):
        model.train()
        counter = Counter()
        for step, (idx, img, times, label) in enumerate(train_loader):
            step, total_step = step + 1, len(train_loader)
            img, times, label = img.to(device), times.to(device), label.to(
                device)

            pred = model(img, times)

            loss = nn.functional.cross_entropy(pred, label)

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            pred = torch.argmax(pred, 1)
            acc = (pred == label).float().mean().cpu().detach().numpy()
            loss = loss.cpu().detach().numpy()

            counter.append(loss=loss, acc=acc)
            print(
                f"Epoch:{epoch}/{EPOCHS}, Step:{step}/{total_step}, "
                f"Loss:{loss:.04f}/{counter.loss:.04f}, "
                f"Accuracy:{acc:0.4f}/{counter.acc:.04f}",
                end='\r',
                flush=True)

        torch.save(model.state_dict(), MODEL_FILE)
        print()
Ejemplo n.º 17
0
 def escapes(self, start):
     """given a starting state it evaluate which states are reachable and the corresponding transition rate"""
     start = Counter(start)
     end_states = []
     kinetics = []
     for substrate, products, kinetic in self.reactions:
         end_state, kinetic = shift(start, substrate, products, kinetic)
         if kinetic and end_state is not None:
             end_states.append(end_state)
             kinetics.append(float(kinetic))
     kinetics = np.array(kinetics)
     return end_states, kinetics
Ejemplo n.º 18
0
    def transition_matrix(self, start):
        """create the transition matrix and the state vector from a starting point

        Will stuck in an infinite loop if the CME is not limited
        """
        start = Counter(start)
        states = [start]
        transitions = dict()
        for state in states:
            for destination, kinetic in zip(*self.escapes(state)):
                if destination not in states:
                    states.append(destination)
                transitions[tuple(state.items()), tuple(destination.items())] = kinetic
        return transitions, states
Ejemplo n.º 19
0
def main():
    """
    Run harmonic analysis on all songs in all databases. This will take a LONG time.
    """
    parser = OptionParser()

    parser.add_option("-d",
                      "--durk-step",
                      dest="durk_step",
                      default=4,
                      type="int")
    parser.add_option("-t",
                      "--pool-size",
                      dest="pool_size",
                      default=8,
                      type="int")
    parser.add_option("-u",
                      "--username",
                      dest="db_username",
                      default="postgres")
    parser.add_option("-p",
                      "--password",
                      dest="db_password",
                      default="postgres")
    (options, args) = parser.parse_args()

    print "Creating", options.pool_size, "processes."
    processes = []

    # Initialize the counter to 0
    counter = Counter(0)

    # get all database engines
    engines = get_engines(options.pool_size, options.db_username,
                          options.db_password)

    # Construct a new HarmonicAnalyzer process for each database.
    for i in xrange(options.pool_size):
        p = HarmonicAnalyzer(options.durk_step, engines[i], counter)
        processes.append(p)

    # Start the processes
    print "Starting", options.pool_size, "processes."
    for p in processes:
        p.start()

    # And wait for them to finish
    for p in processes:
        p.join()
Ejemplo n.º 20
0
    def calculate_gradient(self, label, feat_index, weights):
        # feat_index does not hold any zero values, but instead it is used to index
        # which features are applicable to a particular word. ie feat_vec == [4,5] means
        # the 4th and 5th features are 1, and all others are 0
        sig_val = self.sigmoid(feat_index, weights)

        # Create a Counter object to store the gradients for this token
        grad = Counter()

        # The cost function being used is (sigma(wx) - y) where sigma is the logistic value, and y is the correct label
        # This function keeps all values zero except those that have features show up
        for index_val in feat_index:
            grad.increment_count(index_val, (sig_val - label))

        return grad
Ejemplo n.º 21
0
def number_folders(folders, pad):
	if len(folders) == 0:
		return

	counter = Counter(0, pad = pad)
	files = []
	for n in folders:
		n.value = counter.i() + n.value
		sub_folders = []
		sub_files = []
		for child in n.children:
			if child.node_type == '<FOLDER>':
				sub_folders.append(child)
			elif child.node_type == '<FILE>':
				sub_files.append(child)
		number_folders(sub_folders, pad)
		number_files(sub_files, pad)
Ejemplo n.º 22
0
 def gillespie(self, start, steps=10):
     """make n step of gillespie simulation given the starting state"""
     start = Counter(start)
     time = 0.0
     while time<steps:
     #for i in xrange(steps):
         end_states, kinetics = self.escapes(start)
         cumulative = np.cumsum(kinetics)
         if not len(end_states) or not len(cumulative):
             # ho raggiunto uno stato stazionario
             yield start, np.inf
             break
         lambda_tot = cumulative[-1]
         dt = rand_exp(1./lambda_tot)
         selected = np.searchsorted(cumulative/lambda_tot, np.random.rand())
         new_state = end_states[selected]
         yield start, time, dt
         time +=dt
         start = new_state
Ejemplo n.º 23
0
def main():
    # start = time.time()
    config = load_config()
    num_agents = config['num_agents']
    state_dim = config['state_dim']
    state_length = config['state_length']
    action_dim = config['action_dim']
    exploration_param = config['exploration_param']
    lr = config['learning_rate']
    betas = config['betas']
    gamma = config['discount_factor']
    K_epochs = config['ppo_epoch']
    ppo_clip = config['ppo_clip']

    torch.manual_seed(123)

    traffic_light = TrafficLight()
    counter = Counter()

    shared_model = PPO(state_dim, state_length, action_dim, exploration_param,
                       lr, betas, gamma, K_epochs, ppo_clip)

    shared_model.policy.share_memory()

    batch_buffer = shared_batch_buffer()

    # optimizer = optim.Adam(shared_model.policy.parameters(), lr=lr)

    processes = []
    p = mp.Process(target=chief,
                   args=(config, traffic_light, counter, shared_model,
                         batch_buffer))
    p.start()
    processes.append(p)
    for rank in range(num_agents):
        p = mp.Process(target=train,
                       args=(rank, config, traffic_light, counter,
                             shared_model, batch_buffer))
        p.start()
        processes.append(p)
    for p in processes:
        p.join()
Ejemplo n.º 24
0
def train(args):
    base_dir = args.base_dir
    dirs = init_dir(base_dir) #utils
    init_log(dirs['log'])#utils
    config_dir = args.config_dir
    copy_file(config_dir, dirs['data'])
    config = configparser.ConfigParser()
    config.read(config_dir)
    in_test, post_test = init_test_flag(args.test_mode)

# init env
    env = init_env(config['ENV_CONFIG']) #seeonce
    logging.info('Training: s dim: %d, a dim %d, s dim ls: %r, a dim ls: %r' %
                 (env.n_s, env.n_a, env.n_s_ls, env.n_a_ls)) #logging?


    # init step counter
    total_step = int(config.getfloat('TRAIN_CONFIG', 'total_step'))
    test_step = int(config.getfloat('TRAIN_CONFIG', 'test_interval'))
    log_step = int(config.getfloat('TRAIN_CONFIG', 'log_interval'))
    global_counter = Counter(total_step, test_step, log_step)#what is this
# init centralized or multi agent

    seed = config.getint('ENV_CONFIG', 'seed')
    if env.agent == 'iddpg':
        model = IDDPG(env.n_s_ls, env.n_a_ls, env.n_w_ls, total_step,
                     config['MODEL_CONFIG'], seed=seed)
    elif env.agent == 'maddpg':  #TODO: Add MADDPG
        model = MADDPG(env.n_s_ls, env.n_a_ls, env.n_w_ls, env.n_f_ls, total_step,
                     config['MODEL_CONFIG'], seed=seed)
    summary_writer = tf.summary.FileWriter(dirs['log'])#what is this
    trainer = Trainer(env, model, global_counter, summary_writer, in_test, output_path=dirs['data'])#utils
    trainer.run()
   #if post_test: #how?
    #    tester = Tester(env, model, global_counter, summary_writer, dirs['data'])
     #   tester.run_offline(dirs['data'])#utils

    # save model#what's this
    final_step = global_counter.cur_step
    logging.info('Training: save final model at step %d ...' % final_step)
    model.save(dirs['model'], final_step)
Ejemplo n.º 25
0
def test_generation(test_file, lexicon_file):

    lexicon = Lexicon(lexicon_file)

    counter = Counter()

    with open(test_file) as f:
        for test in yaml.load(f):
            lemma = test.pop("lemma")
            location = test.pop("location", "")
            for parse, form in test.items():
                predicted = lexicon.generate(lemma, parse, context=location)
                if predicted is None:
                    counter.fail("didn't know how to work out {} {} {}".format(lemma, parse, form))
                elif strip_length(form) == strip_length(predicted):
                    counter.success()
                    continue
                elif strip_length(form) not in [strip_length(p) for p in predicted.split("/")]:
                    counter.fail("{} {} got {} instead of {} in {}".format(lemma, parse, predicted, form, location))
                else:
                    counter.skip("{} {} {} {} {}".format(lemma, parse, form, predicted, location))

    counter.results()
Ejemplo n.º 26
0
def train(args):
    base_dir = args.base_dir
    dirs = init_dir(base_dir)
    init_log(dirs['log'])
    config_dir = args.config_dir
    copy_file(config_dir, dirs['data'])
    config = configparser.ConfigParser()
    config.read(config_dir)
    in_test, post_test = init_test_flag(args.test_mode)

    # init env
    env = init_env(config['ENV_CONFIG'])
    logging.info('Training: s dim: %d, a dim %d, s dim ls: %r, a dim ls: %r' %
                 (env.n_s, env.n_a, env.n_s_ls, env.n_a_ls))

    # init step counter
    total_step = int(config.getfloat('TRAIN_CONFIG', 'total_step'))
    test_step = int(config.getfloat('TRAIN_CONFIG', 'test_interval'))
    log_step = int(config.getfloat('TRAIN_CONFIG', 'log_interval'))
    global_counter = Counter(total_step, test_step, log_step)

    # init centralized or multi agent
    seed = config.getint('ENV_CONFIG', 'seed')
    # coord = tf.train.Coordinator()

    # if env.agent == 'a2c':
    #     model = A2C(env.n_s, env.n_a, total_step,
    #                 config['MODEL_CONFIG'], seed=seed)
    if env.agent == 'ia2c':
        model = IA2C(env.n_s_ls,
                     env.n_a_ls,
                     env.n_w_ls,
                     total_step,
                     config['MODEL_CONFIG'],
                     seed=seed)
    elif env.agent == 'ma2c':
        model = MA2C(env.n_s_ls,
                     env.n_a_ls,
                     env.n_w_ls,
                     env.n_f_ls,
                     total_step,
                     config['MODEL_CONFIG'],
                     seed=seed)
    elif env.agent == 'iqld':
        model = IQL(env.n_s_ls,
                    env.n_a_ls,
                    env.n_w_ls,
                    total_step,
                    config['MODEL_CONFIG'],
                    seed=0,
                    model_type='dqn')
    else:
        model = IQL(env.n_s_ls,
                    env.n_a_ls,
                    env.n_w_ls,
                    total_step,
                    config['MODEL_CONFIG'],
                    seed=0,
                    model_type='lr')

    # disable multi-threading for safe SUMO implementation
    # threads = []
    summary_writer = tf.summary.FileWriter(dirs['log'])
    trainer = Trainer(env,
                      model,
                      global_counter,
                      summary_writer,
                      in_test,
                      output_path=dirs['data'])
    trainer.run()
    # if in_test or post_test:
    #     # assign a different port for test env
    #     test_env = init_env(config['ENV_CONFIG'], port=1)
    #     tester = Tester(test_env, model, global_counter, summary_writer, dirs['data'])

    # def train_fn():
    #     trainer.run(coord)

    # thread = threading.Thread(target=train_fn)
    # thread.start()
    # threads.append(thread)
    # if in_test:
    #     def test_fn():
    #         tester.run_online(coord)
    #     thread = threading.Thread(target=test_fn)
    #     thread.start()
    #     threads.append(thread)
    # coord.join(threads)

    # post-training test
    if post_test:
        tester = Tester(env, model, global_counter, summary_writer,
                        dirs['data'])
        tester.run_offline(dirs['data'])

    # save model
    final_step = global_counter.cur_step
    logging.info('Training: save final model at step %d ...' % final_step)
    model.save(dirs['model'], final_step)
Ejemplo n.º 27
0
def train(args):
    base_dir = args.base_dir
    dirs = init_dir(base_dir)
    init_log(dirs['log'])
    config_dir = args.config_dir
    copy_file(config_dir, dirs['data'])
    config = configparser.ConfigParser()
    config.read(config_dir)
    in_test, post_test = init_test_flag(args.test_mode)

    # init env
    env = init_env(config['ENV_CONFIG'])
    logging.info('Training: s dim: %d, a dim %d, s dim ls: %r, a dim ls: %r' %
                 (env.n_s, env.n_a, env.n_s_ls, env.n_a_ls))

    # init step counter
    total_step = int(config.getfloat('TRAIN_CONFIG', 'total_step'))  #1e6
    test_step = int(config.getfloat('TRAIN_CONFIG', 'test_interval'))  #2e4
    log_step = int(config.getfloat('TRAIN_CONFIG', 'log_interval'))  #1e4
    global_counter = Counter(total_step, test_step, log_step)

    # init centralized or multi agent
    seed = config.getint('ENV_CONFIG', 'seed')  #12
    # coord = tf.train.Coordinator()

    if env.agent == 'ia2c':
        model = IA2C(env.n_s_ls,
                     env.n_a_ls,
                     env.n_w_ls,
                     total_step,
                     config['MODEL_CONFIG'],
                     seed=seed)
    elif env.agent == 'ma2c':
        model = MA2C(env.n_s_ls,
                     env.n_a_ls,
                     env.n_w_ls,
                     env.n_f_ls,
                     total_step,
                     config['MODEL_CONFIG'],
                     seed=seed)
    elif env.agent == 'codql':
        print('This is codql')
        num_agents = len(env.n_s_ls)
        print('num_agents:', num_agents)
        a_dim = env.n_a_ls[0]  # ?????????????????? dim ??or num??
        print('a_dim:', a_dim)
        s_dim = env.n_s_ls[0]
        print('env.n_s_ls=', s_dim)
        s_dim_wait = env.n_w_ls[0]
        print('s_dim_wait:', s_dim_wait)
        #obs_space = s_dim # XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXxx state dim Error
        model = MFQ(nb_agent=num_agents,
                    a_dim=a_dim,
                    s_dim=s_dim,
                    s_dim_wave=s_dim - s_dim_wait,
                    s_dim_wait=s_dim_wait,
                    config=config['MODEL_CONFIG'])
    elif env.agent == 'dqn':
        model = DQN(nb_agent=len(env.n_s_ls),
                    a_dim=env.n_a_ls[0],
                    s_dim=env.n_s_ls[0],
                    s_dim_wave=env.n_s_ls[0] - env.n_w_ls[0],
                    s_dim_wait=env.n_w_ls[0],
                    config=config['MODEL_CONFIG'],
                    doubleQ=False)  #doubleQ=False denotes dqn else ddqn
    elif env.agent == 'ddpg':
        model = DDPGEN(nb_agent=len(env.n_s_ls),
                       share_params=True,
                       a_dim=env.n_a_ls[0],
                       s_dim=env.n_s_ls[0],
                       s_dim_wave=env.n_s_ls[0] - env.n_w_ls[0],
                       s_dim_wait=env.n_w_ls[0])
    elif env.agent == 'iqld':
        model = IQL(env.n_s_ls,
                    env.n_a_ls,
                    env.n_w_ls,
                    total_step,
                    config['MODEL_CONFIG'],
                    seed=0,
                    model_type='dqn')
    else:
        model = IQL(env.n_s_ls,
                    env.n_a_ls,
                    env.n_w_ls,
                    total_step,
                    config['MODEL_CONFIG'],
                    seed=0,
                    model_type='lr')

    summary_writer = tf.summary.FileWriter(dirs['log'])
    trainer = Trainer(env,
                      model,
                      global_counter,
                      summary_writer,
                      in_test,
                      output_path=dirs['data'])
    trainer.run()

    # save model
    final_step = global_counter.cur_step
    logging.info('Training: save final model at step %d ...' % final_step)
    model.save(dirs['model'], final_step)
Ejemplo n.º 28
0
def messageObjectToKind(view, messageObject, messageText=None):
    """
    This method converts a email message string to
    a Chandler C{MailMessage} object

    @param messageObject: A C{email.Message} object representation of a mail message
    @type messageObject: C{email.Message}
    @return: C{MailMessage}
    """

    assert isinstance(messageObject, Message.Message), \
           "messageObject must be a Python email.Message.Message instance"

    assert len(messageObject.keys()) > 0, \
           "messageObject data is not a valid RFC2822 message"

    assert messageText is None or isinstance(messageText, str), \
           "messageText can either be a string or None"

    mailStamp = None
    icsSummary = None
    icsDesc = None

    chandlerAttachments = getChandlerAttachments(messageObject)

    if chandlerAttachments["eimml"]:
        eimml = chandlerAttachments["eimml"][0]
        peer = getPeer(view, messageObject)

        if peer is None:
            # A peer address is required for eimml
            # deserialization. If there is no peer
            # then ignore the eimml data and return
            # an error flag.
            return (-1, None)

        matchingAddresses = []

        for address in addressMatchGenerator(peer):
            matchingAddresses.append(address)

        # the matchingAddresses list will at least contain the
        # peer address since it is an EmailAddress Item and
        # there for will be in the EmailAddressCollection index.
        statusCode, mailStamp = parseEIMML(view, peer, matchingAddresses,
                                           eimml)

        if statusCode != 1:
            # There was either an error during
            # processing of the eimml or the
            # eimml was older than the current
            # Item's state so it was ignored.
            return (statusCode, None)

    elif chandlerAttachments["ics"]:
        ics = chandlerAttachments["ics"][0]

        result = parseICS(view, ics, messageObject)

        if result is not None:
            # If the result is None then there
            # was an error that prevented converting
            # the ics text to a Chandler Item
            # in which case the ics is ignored and
            # the rest of the message parsed.
            mailStamp, icsDesc, icsSummary = result

    if not mailStamp:
        mailStamp = MailMessage(itsView=view)
        mailStamp.fromEIMML = False

    if not IGNORE_ATTACHMENTS:
        # Save the original message text in a text blob
        if messageText is None:
            messageText = messageObject.as_string()

        mailStamp.rfc2822Message = dataToBinary(mailStamp, "rfc2822Message",
                                                messageText, 'message/rfc822',
                                                'bz2', False)

    if getattr(mailStamp, "messageId", None):
        # The presence a messageId indicated that
        # this message has already been sent or
        # received and thus has been updated.
        mailStamp.isUpdated = True

    #if verbose():
    #    if messageObject.has_key("Message-ID"):
    #        messageId = messageObject["Message-ID"]
    #    else:
    #        messageId = "<Unknown Message>"
    #
    #    buf = ["Message: %s\n-------------------------------" % messageId]

    if not mailStamp.fromEIMML:
        # Do not parse the message to build the
        # item.body since that data is in the eimml.

        if IGNORE_ATTACHMENTS:
            counter = None
        else:
            counter = Counter()

        bodyBuffer = {'plain': [], 'html': []}
        buf = None

        # The body of the message will be contained in the eimml so
        # do not try and parse the mail message body.
        __parsePart(view, messageObject, mailStamp, bodyBuffer, counter, buf)

        mailStamp.body = buildBody(bodyBuffer)

    __parseHeaders(view, messageObject, mailStamp, True, True)

    if icsSummary or icsDesc:
        # If ics summary or ics description exist then
        # add them to the message body
        mailStamp.body += buildICSInfo(mailStamp, icsSummary, icsDesc)

    #if verbose():
    #    trace("\n\n%s\n\n" % '\n'.join(buf))

    return (1, mailStamp)
Ejemplo n.º 29
0
 def setUp(self):
     self.globalCounter = Counter()
     self.functionCounter = Counter()
     self.classCounter = Counter()
     self.traitCounter = Counter()
     self.typeParameterCounter = Counter()
Ejemplo n.º 30
0
parser.add_argument('--feature', type=int, default=153, help='features num')
parser.add_argument('--force',
                    action='store_true',
                    help='force two leg together')
parser.add_argument('--start-epoch', type=int, default=0, help='start-epoch')

if __name__ == '__main__':
    args = parser.parse_args()
    os.environ['OMP_NUM_THREADS'] = '1'
    torch.manual_seed(args.seed)

    num_inputs = args.feature
    num_actions = 18

    traffic_light = TrafficLight()
    counter = Counter()

    ac_net = ActorCritic(num_inputs, num_actions)
    opt_ac = optim.Adam(ac_net.parameters(), lr=args.lr)

    shared_grad_buffers = Shared_grad_buffers(ac_net)
    shared_obs_stats = Shared_obs_stats(num_inputs)

    if args.resume:
        print("=> loading checkpoint ")
        checkpoint = torch.load('../../7.87.t7')
        #checkpoint = torch.load('../../best.t7')
        args.start_epoch = checkpoint['epoch']
        #best_prec1 = checkpoint['best_prec1']
        ac_net.load_state_dict(checkpoint['state_dict'])
        opt_ac.load_state_dict(checkpoint['optimizer'])