Beispiel #1
0
def test_rle():
    assert rle("mississippi") == [
        ("m", 1),
        ("i", 1),
        ("s", 2),
        ("i", 1),
        ("s", 2),
        ("i", 1),
        ("p", 2),
        ("i", 1),
    ]
Beispiel #2
0
def zero_crossing(array):
    signs = map(sign, array)
    rles = rle(signs)
    runs_only = []
    for tup in rles:
        if tup[1] !=0:
            runs_only += [tup[1]]
    count_zc = 0
    for i in range(len(runs_only)-1):
        if runs_only[i] < 0 and runs_only[i+1] > 0:
            count_zc += 1
    return count_zc
Beispiel #3
0
    def sleep_contiguous(moving, fs, min_valid_time=300):
        """ fs = sampling frequency (Hz), min_valid_time = min amount immobile time that counts as sleep (i.e 5 mins) """
        min_len = fs * min_valid_time
        r_sleep = rle(np.logical_not(moving))
        valid_runs = r_sleep[2] >= min_len
        r_sleep_mod = valid_runs & r_sleep[0]
        r_small = []
        counter = 0
        for i in r_sleep_mod:
            r_small += ([i] * r_sleep[2][counter])
            counter += 1

        return r_small
Beispiel #4
0
 def test_rle(self):
     self.assertEqual(
         rle("mississippi"),
         [
             ("m", 1),
             ("i", 1),
             ("s", 2),
             ("i", 1),
             ("s", 2),
             ("i", 1),
             ("p", 2),
             ("i", 1),
         ],
     )
Beispiel #5
0
 def save_map_rle(self, widget, event):
     print "save map"
     fcd = gtk.FileChooserDialog(title="Save map into file",
                                 parent=None,
                                 action=gtk.FILE_CHOOSER_ACTION_SAVE,
                                 buttons=(gtk.STOCK_SAVE, gtk.RESPONSE_OK),
                                 backend=None)
     response = fcd.run()
     if response == gtk.RESPONSE_OK:
         f = fcd.get_filename()
         self.ui.m.export(f)
         f2 = "%s%s" % (f, "temp")
         shutil.move(f, f2)
         r = rle()
         r.compress("%s%s" % (f, "temp"), f)
     fcd.destroy()
     print f
Beispiel #6
0
def hmm_mean_length(state_array, delta_t=60):
    """State_array =  1D numpy array produced from a HMM decoder
        Total_states = numerical array denoting the states in 'state_array'
        Finds the mean length of each hidden state per array/fly """
    from rle import rle

    delta_t_mins = delta_t / 60

    v, s, l = rle(state_array)

    df = pd.DataFrame(data=zip(v, l), columns=['state', 'length'])
    df['length_adjusted'] = df['length'].map(lambda l: l * delta_t_mins)
    gb_bout = df.groupby('state').agg(
        **{'mean_length': ('length_adjusted', 'mean')})
    gb_bout.reset_index(inplace=True)

    return gb_bout
Beispiel #7
0
    def sleep_contiguous(self, column='moving', min_valid_time=300):
        """ fs = sampling frequency (Hz), min_valid_time = min amount immobile time that counts as sleep (i.e 5 mins) """

        from rle import rle

        t_delta = self['t'].iloc[1] - self['t'].iloc[0]
        fs = 1 / t_delta

        moving = self[column]

        min_len = fs * min_valid_time
        r_sleep = rle(np.logical_not(moving))
        valid_runs = r_sleep[2] >= min_len
        r_sleep_mod = valid_runs & r_sleep[0]
        r_small = []
        counter = 0
        for i in r_sleep_mod:
            r_small += ([i] * r_sleep[2][counter])
            counter += 1

        self['asleep'] = r_small
Beispiel #8
0
def hmm_pct_transition(state_array, total_states):
    """State_array =  1D numpy array produced from a HMM decoder
        Total_states = numerical array denoting the states in 'state_array'
        Finds the proportion of instances of each state run per array/fly"""
    from rle import rle

    v, s, l = rle(state_array)

    states_dict = {}

    def average(a):
        total = a.sum()
        count = len(a)
        av = total / count
        return av

    for i in total_states:
        states_dict[f'{i}'] = average(np.where(v == i, 1, 0))

    state_list = [states_dict]
    df = pd.DataFrame(state_list)

    return df
Beispiel #9
0
def test_escape():
    assert rle('\\\\') == '2\\' 
Beispiel #10
0
def test_Randers():
    assert rle('Rr') == '1R1r'
Beispiel #11
0
def test_floats1():
    assert rle('3,14') == '131,1114'
Beispiel #12
0
def test_floats():
    assert rle('3.14') == '131.1114'
Beispiel #13
0
def test_enkod_dekod_tal():
    x = '1111'
    assert decode(rle(x)) == x
Beispiel #14
0
def rle_rank(hand):
    ranks = get_ranks(hand)
    return sorted(rle(sorted(ranks)), reverse=True)
Beispiel #15
0
def main():  # noqa: D103
    parser = argparse.ArgumentParser(description='Run DQN on Atari Breakout')
    parser.add_argument('--env', default='Breakout-v0', help='Atari env name')
    parser.add_argument('-o',
                        '--output',
                        default='../log/',
                        help='Directory to save data to')
    parser.add_argument('--seed', default=0, type=int, help='Random seed')
    parser.add_argument('--gamma',
                        default=0.99,
                        type=float,
                        help='Discount factor')
    parser.add_argument('--batch_size',
                        default=32,
                        type=int,
                        help='Minibatch size')
    parser.add_argument('--learning_rate',
                        default=0.0001,
                        type=float,
                        help='Learning rate')
    parser.add_argument(
        '--initial_epsilon',
        default=1.0,
        type=float,
        help='Initial exploration probability in epsilon-greedy')
    parser.add_argument('--final_epsilon',
                        default=0.05,
                        type=float,
                        help='Final exploration probability in epsilon-greedy')
    parser.add_argument(
        '--exploration_steps',
        default=2000000,
        type=int,
        help=
        'Number of steps over which the initial value of epsilon is linearly annealed to its final value'
    )
    parser.add_argument(
        '--num_samples',
        default=10000000,
        type=int,
        help='Number of training samples from the environment in training')
    parser.add_argument('--num_frames',
                        default=4,
                        type=int,
                        help='Number of frames to feed to Q-Network')
    parser.add_argument('--num_frames_mv',
                        default=10,
                        type=int,
                        help='Number of frames to used to detect movement')
    parser.add_argument('--frame_width',
                        default=84,
                        type=int,
                        help='Resized frame width')
    parser.add_argument('--frame_height',
                        default=84,
                        type=int,
                        help='Resized frame height')
    parser.add_argument(
        '--replay_memory_size',
        default=1000000,
        type=int,
        help='Number of replay memory the agent uses for training')
    parser.add_argument(
        '--target_update_freq',
        default=10000,
        type=int,
        help='The frequency with which the target network is updated')
    parser.add_argument('--train_freq',
                        default=4,
                        type=int,
                        help='The frequency of actions wrt Q-network update')
    parser.add_argument('--save_freq',
                        default=200000,
                        type=int,
                        help='The frequency with which the network is saved')
    parser.add_argument('--eval_freq',
                        default=200000,
                        type=int,
                        help='The frequency with which the policy is evlauted')
    parser.add_argument(
        '--num_burn_in',
        default=50000,
        type=int,
        help=
        'Number of steps to populate the replay memory before training starts')
    parser.add_argument('--load_network',
                        default=False,
                        action='store_true',
                        help='Load trained mode')
    parser.add_argument('--load_network_path',
                        default='',
                        help='the path to the trained mode file')
    parser.add_argument(
        '--net_mode',
        default='dqn',
        help='choose the mode of net, can be linear, dqn, duel')
    parser.add_argument('--max_episode_length',
                        default=10000,
                        type=int,
                        help='max length of each episode')
    parser.add_argument('--num_episodes_at_test',
                        default=10,
                        type=int,
                        help='Number of episodes the agent plays at test')
    parser.add_argument('--ddqn',
                        default=False,
                        dest='ddqn',
                        action='store_true',
                        help='enable ddqn')
    parser.add_argument('--train',
                        default=True,
                        dest='train',
                        action='store_true',
                        help='Train mode')
    parser.add_argument('--test',
                        dest='train',
                        action='store_false',
                        help='Test mode')
    parser.add_argument('--no_experience',
                        default=False,
                        action='store_true',
                        help='do not use experience replay')
    parser.add_argument('--no_target',
                        default=False,
                        action='store_true',
                        help='do not use target fixing')
    parser.add_argument('--no_monitor',
                        default=False,
                        action='store_true',
                        help='do not record video')
    parser.add_argument('-p',
                        '--platform',
                        default='rle',
                        help='rle or atari. rle: rle; atari: gym-atari')
    parser.add_argument('-pl',
                        '--perlife',
                        default=False,
                        action='store_true',
                        help='use per life or not. ')
    parser.add_argument('-mv',
                        '--mv_reward',
                        default=False,
                        action='store_true',
                        help='use movement reward or not')
    parser.add_argument('-c',
                        '--clip_reward',
                        default=False,
                        action='store_true',
                        help='clip reward or not')
    parser.add_argument('--decay_reward',
                        default=False,
                        action='store_true',
                        help='decay reward or not')
    parser.add_argument('--expert_memory',
                        default=None,
                        help='path of the expert memory')
    parser.add_argument(
        '--initial_prob_replaying_expert',
        default=1.0,
        type=float,
        help='Initial probability of using expert replaying memory')
    parser.add_argument(
        '--final_prob_replaying_expert',
        default=0.05,
        type=float,
        help='Final probability of using expert replaying memory')
    parser.add_argument(
        '--steps_replaying_expert',
        default=1000000,
        type=float,
        help=
        '# steps over which the initial prob of replaying expert memory is linearly annealed to its final value'
    )
    parser.add_argument('--trace_dir',
                        default='',
                        help='the trace dir for expert')
    parser.add_argument('--trace2mem',
                        default=False,
                        action='store_true',
                        help='convert trace to memory')
    parser.add_argument('--mem_dump',
                        default='',
                        help='the path of memory dump')
    args = parser.parse_args()
    args.output = get_output_folder(args.output, args.env)

    if args.trace2mem:
        trace2mem(args)
        exit(0)

    if args.platform == 'atari':
        env = gym.make(args.env)
    else:
        rom_path = 'roms/' + args.env
        if args.no_monitor:
            env = rle(rom_path, record=True, path=args.output)
        else:
            env = rle(rom_path)
    print("Output saved to: ", args.output)
    print("Args used:")
    print(args)

    # here is where you should start up a session,
    # create your DQN agent, create your model, etc.
    # then you can run your fit method.

    num_actions = env.action_space.n
    print("Game ", args.env, " #actions: ", num_actions)
    dqn = DQNAgent(args, num_actions)
    if args.train:
        print("Training mode.")
        if args.perlife:
            env = RLEEnvPerLifeWrapper(env)
        dqn.fit(env, args.num_samples, args.max_episode_length)
    else:
        print("Evaluation mode.")
        dqn.evaluate(env, args.num_episodes_at_test, args.max_episode_length,
                     not args.no_monitor)
Beispiel #16
0
def test_numbers():
    assert rle('123') == '111213'
Beispiel #17
0
def test_weird():
    assert  rle('§~*') == '1§1~1*'
Beispiel #18
0
def test_nordic():
    assert rle('æøå') == '1æ1ø1å'
Beispiel #19
0
def test_enkod_dekod_hypo(x):
    assert decode(rle(x)) == x
Beispiel #20
0
    df2 = mago_df.xmv('treatment', treat)
    df2 = df2[df2['t_rel'] == 0]
    df2['bin'] = df2['interaction_t'].map(lambda t: bin * floor(t / bin))
    df2.reset_index(inplace=True)
    df = pd.merge(s, df2, how='outer', on=['id', 'bin'])
    df['col'] = np.where((df['t_rel'] == 0) & (df['change'] == True), 'purple',
                         df['col'])
    df['col'] = np.where((df['t_rel'] == 0) & (df['change'] == False),
                         'orangered', df['col'])
    df['bin'] = df['bin'].map(lambda t: t / (60 * 60))
    df = df[['id', 'state', 'col']]

    all_df = pd.DataFrame()

    for _, fly in df.groupby('id'):
        v, s, l = rle(fly['state'])

        list_runs = np.array([], dtype=np.int)
        for counter, run in enumerate(l):
            x = [counter] * run
            list_runs = np.append(list_runs, x)

        fly['run'] = list_runs

        counts = np.array([])
        for i in fly.groupby('run')['col'].apply(list):
            count = 0
            for q in i:
                if q == 'orangered':
                    count += 1
            counts = np.append(counts, count)
Beispiel #21
0
def test_simple():
    assert rle ('kkkaaa') == '3k3a5b'
Beispiel #22
0
def test_rle2():
    assert rle('kkkkkkoooo') == '6k4o'
Beispiel #23
0
def test_empty():
    assert rle('') == ''
Beispiel #24
0
def test_spaces():
    assert rle('kkk eee  lll') == '3k1 3e2 3l'
Beispiel #25
0
def rle_suit(hand):
    suits = get_suits(hand)
    return sorted(rle(sorted(suits)), reverse=True)
Beispiel #26
0
def test_rle():
    assert rle('kkk') == '3k'
Beispiel #27
0
def test_alot():
    assert rle(' ') == '1 '
Beispiel #28
0
 def test_rle_empty(self):
     self.assertEqual(list(rle("")), [])
Beispiel #29
0
from rle import rle
from ioutils import read_ints

if __name__ == "__main__":
    array = read_ints()
    length_and_run = rle(array)
    for _, run in length_and_run:
        print(run)
Beispiel #30
0
def test_fun():
    assert rle('äâã') == '1ä1â1ã'