Esempio n. 1
0
def scheduler(rid, sid):
    one_route_crowd(rid)
    served = []
    dfirst = df.loc[rid]['departure_from_origin'].split(",")[sid]
    afirst = df.loc[rid]['arrival_at_destination'].split(',')[sid]
    dsec = get_sec(dfirst + ":00")
    asec = get_sec(afirst + ":00")
    print(dsec, asec)
    dif = (asec - dsec) / len(json.loads(df.loc[rid]['map_json_content']))
    timetable = []
    timetable.append(dsec)
    for i in range(len(json.loads(df.loc[rid]['map_json_content'])) - 2):
        timetable.append(dsec + ((i + 1) * dif))
    timetable.append(asec)
    enviro = env(crowd_new)
    agent = Agent(enviro.stops, timetable)
    for i in range(20000):
        ppl_served = 0
        while True:
            action = agent.decide_action(enviro.state)
            # print(len(agent.passengers))
            (done, nextState, (noppl,
                               pass1)) = enviro.step(action, agent.capacity,
                                                     agent.passengers)
            ppl_served += noppl
            agent.descending(nextState - 1)
            # print(len(pass1))
            agent.update_reward(action, nextState - 1, noppl)
            if done:
                if i % 1000 == 0:
                    print("done" + str(i))
                break
        served.append(ppl_served)
    return {"time": agent.time, "optimise": served}
Esempio n. 2
0
def PCRW(task_relation='<isAffiliatedTo>', MAXLEN=5):
    rootpath = './data/yagosmall/' + task_relation + '/'

    datapath = {'type2id': rootpath + 'type2id.json', 'relation2id': rootpath + 'relation2id.json', \
                'graph': rootpath + 'graph.pkl', 'ent2type': rootpath + 'ent2type.json', \
                'entity2id': rootpath + 'entity2id.json'}
    Env_a = env(datapath)
    Env_a.init_relation_query_state(task_relation)
    maxlen = MAXLEN
    Env_a.filter_query(maxlen)
    pairs = Env_a.filter_query
    random.shuffle(pairs)

    training_pairs = pairs[int(len(pairs) * 0.9):]
    test_pairs = pairs[:int(len(pairs) * 0.5)]
    if len(test_pairs) > 5000:
        test_pairs = test_pairs[:5000]
    allpaths = []
    for i in test_pairs:
        e1, e2 = i[0], i[1]
        paths = Env_a.path_traverse_BFS(e1, e2, max_len=MAXLEN)

        allpaths.append(paths)
    with open(rootpath + 'PCRW-%d' % MAXLEN + task_relation, 'w') as fin:
        wrt_str = []
        for paths in allpaths:
            M = []
            for path in paths:
                M.append('\t'.join(path))
            wrt_str.append('\n'.join(M))
        wrt_str = '\n'.join(wrt_str)
        fin.write(wrt_str)
    with open(rootpath + 'PCRW-%d' % MAXLEN + task_relation, 'r') as fin:
        Feature_SET = {}
        for i in fin.readlines():
            line = i.strip().split()
            feature = []
            for k, i in enumerate(line):
                if k % 2 == 1:
                    feature.append(i)
            feature = tuple(feature)
            if feature in Feature_SET:
                Feature_SET[feature] += 1
            else:
                Feature_SET[feature] = 1
    print('Path feature:', Feature_SET)
Esempio n. 3
0
def write_test_pairs(task_relation="<diedIn>", rootpath=None):
    datapath = {'type2id': rootpath + 'type2id.json', 'relation2id': rootpath + 'relation2id.json', \
                'graph': rootpath + 'graph.pkl', 'ent2type': rootpath + 'ent2type.json', \
                'entity2id': rootpath + 'entity2id.json'}
    Env_a = env(datapath)
    Env_a.init_relation_query_state(task_relation)

    maxlen = 8
    Env_a.filter_query(maxlen, 5000)
    pairs = Env_a.filter_query
    random.shuffle(pairs)
    test_pairs = pairs[:int(len(pairs) * 0.5)]
    with open(rootpath + 'test_positive_pairs', 'w') as fin:
        wstr = []
        for i in test_pairs:
            wstr.append(str(i[0] + '\t' + str(i[1])))
        wstr = '\n'.join(wstr)
        fin.write(wstr)
    return
Esempio n. 4
0
 def test_empty_dict(self):
     self.assertEqual(env('EMPTY_DICT'), {})
Esempio n. 5
0
def train(task_relation="<diedIn>",rootpath=None,epoch=5):
    datapath = {'type2id': rootpath + 'type2id.json', 'relation2id': rootpath + 'relation2id.json', \
         'graph': rootpath + 'graph.pkl', 'ent2type': rootpath + 'ent2type.json' ,\
         'entity2id': rootpath + 'entity2id.json'}
    Env_a = env(datapath)
    Env_a.init_relation_query_state(task_relation)
    batchsize=20
    maxlen=5
    po = Policy_memory(Env_a,300, 100, Env_a.rel_num)
    # Env_a.filter_query(maxlen,5000)
    # pairs = Env_a.filter_query
    # random.shuffle(pairs)
    # training_pairs=pairs
    # test_pairs=pairs[:int(len(pairs)*0.5)]
    # valid_paris=pairs[int(len(pairs)*0.5):int(len(pairs)*0.6)]
    train_path=rootpath+'/'+task_relation+'train_pairs'
    valid_path = rootpath + '/' + task_relation + 'valid_pairs'
    training_pairs=load_pair(train_path)
    valid_paris=load_pair(valid_path)

    print('Train pairs:',len(training_pairs))
    print('valid pairs:',len(valid_paris))
    #print('Test pairs:',len(test_pairs))
    agent_a = agent(po, Env_a,policymethod='GRU')
    if global_device=='cuda:0':
        po=po.cuda()

    try_count, batch_loss, ave_reward, ave_success = 0, 0, 0, 0
    opt=torch.optim.Adam(agent_a.parameters()+Env_a.parameters(),lr=0.001)
    for ep in range(epoch):
        opt.zero_grad()
        random.shuffle(training_pairs)
        for query in training_pairs:
            try:
                e1, e2 = query[0], query[1]
                e1, e2 = Env_a.entity2id[e1], Env_a.entity2id[e2]
                with torch.no_grad():
                    traj, success = agent_a.trajectory(e1, e2,max_len=maxlen)
                try_count += 1
            except KeyError:
                continue
            logger.MARK(Env_a.traj_for_showing(traj))
            traj_loss=0
            po.zero_history()
            traj_reward=0

            for i in traj:

                ave_reward+=i[4]
                traj_reward+=i[4]
                loss=agent_a.update_memory_policy(i)
                loss.backward()
                traj_loss+=loss.cpu()
            if success:
                ave_success+=1
            batch_loss+=traj_loss/len(traj)
            if try_count%batchsize==0 and try_count>0:
                opt.step()
                opt.zero_grad()
                logger.info('|%d epoch|%d eposide|Batch_loss:%.4f|Ave_reward:%.3f|Ave_success:%%%.2f|'%(ep,try_count,batch_loss*100/batchsize,ave_reward/batchsize,ave_success*100/batchsize))
                batch_loss,ave_reward,ave_success=0,0,0

            if try_count%(20*batchsize)==0 and try_count>0:
                valid(valid_paris,Env_a,agent_a,batchsize,maxlen)

        generate_paths(Env_a,agent_a,test_pairs,rootpath+task_relation+'.paths',maxlen)
Esempio n. 6
0
 def test_env_driver_with_undefined_browser(self):
     self.assertEqual(env_driver(env('DB')), False)
Esempio n. 7
0
 def test_dict_values(self):
     self.assertEqual(env('DICT')['key1'], 'value1')
     self.assertEqual(env('DICT')['key2'], 'value2')
Esempio n. 8
0
 def test_env_with_defined_key(self):
     self.assertEqual(env('BROWSER'), 'chrome')
Esempio n. 9
0
 def test_env_driver_with_undefined_browser(self):
     self.assertEqual(env_driver(env('DB')), False)
Esempio n. 10
0
 def test_dict_values(self):
     self.assertEqual(env('DICT')['key1'], 'value1')
     self.assertEqual(env('DICT')['key2'], 'value2')
Esempio n. 11
0
# Pull in the command factory for the second example.
from Helpers.Commands import CommandFactory, Kwargs
from selenium.webdriver.support.wait import WebDriverWait

# This is where the WebDriver is instantiated. Instead
# of instantiating it directly, use the `env` and
# `env_driver` functions to grab it based on the
# `.env` configuration file.
# This could be written as:
#
#   browser = env("BROWSER")
#   web_driver = env_driver(browser)
#   with quitting(web_driver()) as driver:
#       pass
#
with quitting(env_driver(env("BROWSER"))()) as driver:
    wait = WebDriverWait(driver, 30)

    # Pass the web driver to the site automation along with anything
    # else it might need to do its job. This could include an
    # instance of WebDriverWait, and even the collection of
    # Models.
    google_search = GoogleExample.GoogleSearch(driver, wait, Models)
    bing_search = BingExample.BingSearch(driver, wait, Models)

    # Do stuff with your controllers.
    google_search.do_search('google wiki')
    sleep(5)
    bing_search.do_search('bing wiki')
    sleep(5)
Esempio n. 12
0
 def test_get_dict(self):
     self.assertIsInstance(env('DICT'), dict)
Esempio n. 13
0
 def test_list_values(self):
     self.assertEqual(env('LIST')[0], 'Item1')
     self.assertEqual(env('LIST')[1], 'Item2')
Esempio n. 14
0
 def test_get_list(self):
     self.assertIsInstance(env('LIST'), list)
Esempio n. 15
0
 def test_get_database_with_undefined_database_type(self):
     self.assertEqual(get_database(env('BROWSER')), False)
Esempio n. 16
0
 def test_get_database_with_defined_database_type(self):
     self.assertIsInstance(get_database(env('DB_TYPE')), SqliteDatabase)
Esempio n. 17
0
 def init_env(self, task_relation):
     self.Env_a = env(self.datapath)
     self.Env_a.init_relation_query_state(task_relation)
Esempio n. 18
0
 def test_empty_list(self):
     self.assertEqual(env('EMPTY_LIST'), [])
Esempio n. 19
0
        res = self.policy.forward(cur_e, cur_r, target_e)
        res = -torch.log(res) * y
        loss = torch.sum(res)
        return loss

    def get_state_from_env(self, ent):
        pass


if __name__ == '__main__':
    torch.device('cpu')
    rootpath = './data/yago10w_RL/'
    datapath = {'type2id': rootpath + 'type2id.json', 'relation2id': rootpath + 'relation2id.json' \
           , 'graph': rootpath + 'graph.pkl', 'ent2type': rootpath + 'ent2type.json' \
           , 'entity2id': rootpath + 'entity2id.json' }
    hu = env(datapath)
    hu.init_relation_query_state('<isCitizenOf>')

    po = Policy(300, 32, 200)
    po.init()

    ag = agent(po, hu)
    #print(hu.relation2id)
    count = 0
    for k, fact in enumerate(hu.query):
        e1, e2 = fact[0], fact[1]
        e1, e2 = hu.entity2id[e1], hu.entity2id[e2]

        res = ag.trajectory(e1, e2, 5)
        print(res)
Esempio n. 20
0
 def test_empty_dict(self):
     self.assertEqual(env('EMPTY_DICT'), {})
Esempio n. 21
0
 def test_env_with_defined_key(self):
     self.assertEqual(env('BROWSER'), 'chrome')
Esempio n. 22
0
 def test_get_database_with_undefined_database_type(self):
     self.assertEqual(get_database(env('BROWSER')), False)
Esempio n. 23
0
 def test_env_driver_with_defined_browser(self):
     self.assertEqual(env_driver(env('BROWSER')), webdriver.Chrome)
Esempio n. 24
0
 def test_get_dict(self):
     self.assertIsInstance(env('DICT'), dict)
Esempio n. 25
0
 def test_get_database_with_defined_database_type(self):
     self.assertIsInstance(get_database(env('DB_TYPE')), SqliteDatabase)
Esempio n. 26
0
 def test_env_driver_with_defined_browser(self):
     self.assertEqual(env_driver(env('BROWSER')), webdriver.Chrome)
Esempio n. 27
0
 def test_get_list(self):
     self.assertIsInstance(env('LIST'), list)
Esempio n. 28
0
def train(task_relation="<diedIn>", rootpath=None, epoch=5):
    datapath = {'type2id': rootpath + 'type2id.json', 'relation2id': rootpath + 'relation2id.json', \
         'graph': rootpath + 'graph.pkl', 'ent2type': rootpath + 'ent2type.json' ,\
         'entity2id': rootpath + 'entity2id.json'}
    Env_a = env(datapath)
    Env_a.init_relation_query_state(task_relation)
    batchsize = 20
    maxlen = 5
    po = Policy_memory(Env_a, 300, 100, Env_a.rel_num)
    Env_a.filter_query(maxlen, 5000)
    pairs = Env_a.filter_query
    random.shuffle(pairs)

    training_pairs = pairs
    test_pairs = pairs[:int(len(pairs) * 0.5)]
    reward_record = []
    success_record = []
    path_length = 0
    valid_paris = pairs[int(len(pairs) * 0.5):int(len(pairs) * 0.6)]
    print('Train pairs:', len(training_pairs))
    print('valid pairs:', len(valid_paris))
    print('Test pairs:', len(test_pairs))
    agent_a = agent(po, Env_a, policymethod='GRU')
    if global_device == 'cuda:0':
        po = po.cuda()

    try_count, batch_loss, ave_reward, ave_success = 0, 0, 0, 0
    opt = torch.optim.Adam(agent_a.parameters() + Env_a.parameters(), lr=0.001)
    for ep in range(epoch):
        opt.zero_grad()
        random.shuffle(training_pairs)
        for query in training_pairs:
            try:
                e1, e2 = query[0], query[1]
                e1, e2 = Env_a.entity2id[e1], Env_a.entity2id[e2]
                with torch.no_grad():
                    traj, success = agent_a.trajectory(e1, e2, max_len=maxlen)
                try_count += 1
            except KeyError:
                continue

            logger.MARK(Env_a.traj_for_showing(traj))

            traj_loss = 0
            po.zero_history()
            traj_reward = 0

            for i in traj:

                ave_reward += i[4]
                traj_reward += i[4]
                loss = agent_a.update_memory_policy(i)
                loss.backward()
                traj_loss += loss.cpu()
            if success:
                ave_success += 1
                path_length += len(traj) - 1
                success_record.append(1)
            else:
                success_record.append(0)
            reward_record.append(traj_reward)
            batch_loss += traj_loss / len(traj)
            if try_count % batchsize == 0 and try_count > 0:
                opt.step()
                opt.zero_grad()
                logger.info(
                    '|%d epoch|%d eposide|Batch_loss:%.4f|Ave_reward:%.3f|Ave_success:%%%.2f|ave path lenghth:%.2f|'
                    % (ep, try_count, batch_loss * 100 / batchsize,
                       ave_reward / batchsize, ave_success * 100 / batchsize,
                       path_length / ave_success))
                batch_loss, ave_reward, ave_success, path_length = 0, 0, 0, 0

            if try_count % (20 * batchsize) == 0 and try_count > 0:
                valid(valid_paris, Env_a, agent_a, batchsize, maxlen)

        generate_paths(Env_a, agent_a, test_pairs,
                       rootpath + task_relation + '.paths', maxlen)

    success = ave_smooth(success_record, 20)
    reward = ave_smooth(reward_record, 20)

    with open(rootpath + task_relation + 'sucess_record_without.txt',
              'w') as fin:
        wstr = '\n'.join([str(i) for i in success])
        fin.write(wstr)
    with open(rootpath + task_relation + 'reward_record_without.txt',
              'w') as fin:
        wstr = '\n'.join([str(i) for i in reward])
        fin.write(wstr)

    with open(rootpath + task_relation + 'test_positive_pairs', 'w') as fin:
        wstr = []
        for i in test_pairs:
            wstr.append(str(i[0] + '\t' + str(i[1])))
        wstr = '\n'.join(wstr)
        fin.write(wstr)
Esempio n. 29
0
 def test_list_values(self):
     self.assertEqual(env('LIST')[0], 'Item1')
     self.assertEqual(env('LIST')[1], 'Item2')
Esempio n. 30
0
    options = get_options()

    if options.Test:
        vis_update_params['test_mode_on'] = True
        sumoBinary = checkBinary('sumo-gui')

    if options.Train:
        vis_update_params['test_mode_on'] = False
        sumoBinary = checkBinary('sumo')
    else:
        print("Initizlizing Test mood as default")
        vis_update_params['test_mode_on'] = True
        sumoBinary = checkBinary('sumo-gui')

    ## ----- ##
    Proudhon = env(sumoBinary=sumoBinary)  # env.__init__ and template loading
    # environment reset inside episode()
    ## ----- ##

    episode_num = 0

    Algorithm_for_RL, environment_for_next_episode, episode_reward, episode_reward_list = SimTools.episode(sumoBinary=sumoBinary, Proudhon=Proudhon)
    # total_reward_per_episode.append(episode_reward)   #RLcomment
    # reward_history_per_episode.append(episode_reward_list) #RLcomment

    ## --- ##
    environment_for_next_episode.reset(sumoBinary=sumoBinary)
    ## --- ##

    while(episode_num < max_num_episodes):
Esempio n. 31
0
 def test_empty_list(self):
     self.assertEqual(env('EMPTY_LIST'), [])
Esempio n. 32
0
 def __init__(self, bunshin_name, bunshin_type, God_brain):
     self.environment = env(bunshin_name=bunshin_name,
                            bunshin_type=bunshin_type,
                            God_brain=God_brain)
     self.bunshin_type = bunshin_type
    def episode(sumoBinary, RB_RLAlgorithm=None, Proudhon=None, episode_num=0):
        ########################
        # 1 inits
        done = False  # are we done with the episode or not
        step = 0  # step number
        if (Proudhon is None):
            Proudhon = env(sumoBinary=sumoBinary)


        # Proudhon.reset()  #TempComment

        # if(RB_RLAlgorithm is None): #RLcomment
        #     algo_params = q_learning_params  # from Config.py #RLcomment
        #     RB_RLAlgorithm = RLAlgorithm(Proudhon, algo_params= algo_params,
        #     load_q_table = load_q_table, test_mode_on = vis_update_params['test_mode_on'])  # Algorithm for RB Agent #RLcomment
        # ## ######################

        ########################
        # 2 init measurements
        traci.simulationStep()  # After stepping
        Proudhon.get_emer_start_lane()

        # (communication from vehicle):
        getters(Proudhon.list_of_vehicles)  # measure all values from environment (local variable.. since some vehicles exited)
        # (communication to environment):
        Proudhon.measure_full_state()  # measure all values into our agents
        # (communication to algorithm/agent):
        # new_observed_state_for_this_agent = Proudhon.observed_state[0]    #RLcomment

        # Chose Action from Feasible Actions:
        # feasible_actions_for_current_state = Proudhon.get_feasible_actions(Proudhon.list_of_vehicles[1]) #RLcomment
        # chosen_action = RB_RLAlgorithm.pickAction(feasible_actions_for_current_state, new_observed_state_for_this_agent) #RLcomment
        # RB_RLAlgorithm.applyAction(chosen_action, Proudhon.list_of_vehicles[1])  # Request Action on Agent #RLcomment

        # episode_reward = 0    #RLcomment
        # episode_reward_list = []  #RLcomment
        ########################

        # 3: MAIN LOOP
        if (episode_num % vis_update_params['every_n_episodes'] == 0): #TempComment
            print(f'E:{episode_num: <{6}}|S:{0: <{4}} | '
                  f'MaxPossible: {Proudhon.max_possible_cars: <{4}} | '
                  f'ActualPerLane: { [ vehicles_data[i] for i in range(num_lanes) ] } |'
                  f'NumVehicles: {fill_str(str(len(Proudhon.list_of_vehicles)), 5)}')
        while traci.simulation.getMinExpectedNumber() > 0:

            # 3.1: Store last states
            amb_last_velocity = Proudhon.emer.spd
            # last_observed_state = Proudhon.observed_state #RLComment
            # last_observed_state_for_this_agent = last_observed_state #RLcomment

            # ----------------------------------------------------------------- #
            # 3.2:   M O V E      O N E      S I M U L A T I O N       S T E P
            # ----------------------------------------------------------------- #
            traci.simulationStep()  # actual action applying
            step += 1

            # TODO: Turn this into are_we_ok function
            if (Proudhon.list_of_vehicles[0].getL() != Proudhon.emer_start_lane and enable_checks):
                raise ValueError(
                    f"Ambulance Changed lane from {Proudhon.emer_start_lane} to {Proudhon.list_of_vehicles[0].getL()} on step {step}. "
                    f"\nAmbulance Should not change lane. Quitting.")

            if (step % vis_update_params['every_n_iters'] == 0 and episode_num % vis_update_params['every_n_episodes'] == 0): # print step info   #TempComment
                print(f'E:{episode_num: <{6}}|S:{step: <{4}} |' #TempComment
                      f'EmerVel: {fill_str(str(Proudhon.emer.spd), 5)} |'
                      f'EmerGoalDist: {fill_str(str(Proudhon.amb_goal_dist-Proudhon.emer.lane_pose), 5)} |'
                      f'NumVehicles: {fill_str(str(len(Proudhon.list_of_vehicles)), 5)}')

            # ----------------------------------------------------------------- #

            # 3.3: measurements and if we are done check
            getters(Proudhon.list_of_vehicles)
            Proudhon.measure_full_state()
            # new_observed_state_for_this_agent = Proudhon.observed_state[0]  #RLcomment

            done = Proudhon.are_we_done(full_state=Proudhon.full_state, step_number=step)

            # 3.4: reward last step's chosen action
            # reward = Proudhon.calc_reward(amb_last_velocity, done, step)  #RLcomment
            # episode_reward += reward  # for history #RLcomment
            # episode_reward_list.append(reward)  # for history #RLcomment

            # 3.6: Feasibility check for current_state (for next step)
            # feasible_actions_for_current_state = Proudhon.get_feasible_actions(Proudhon.list_of_vehicles[1]) #RLcomment

            # 3.5: update q table using backward reward logic
            # RB_RLAlgorithm.update_q_table(chosen_action, reward, new_observed_state_for_this_agent,  #RLcomment
            #                               last_observed_state_for_this_agent, feasible_actions_for_current_state)  #RLcomment

            if (done):  # DO NOT REMOVE THIS (IT BREAKS IF WE ARE DONE)
                if (episode_num % vis_update_params['every_n_episodes'] == 0):
                    if (
                            done == 1):  # TODO: Remove episode_end_reason outsisde the print check -- we might need it elsewehere
                        episode_end_reason = "max steps"
                    elif (done == 2):
                        episode_end_reason = "ambulance goal"
                    else:
                        raise ValueError(f"Episode: {episode_num} done  is True ={done} but reason not known !")

                    print(f'E:{episode_num: <{6}}|EndStep:{step: <{4}}')
                break

                # 3.7: Actually Choose Action from feasible ones (for next step)
            # chosen_action = RB_RLAlgorithm.pickAction(feasible_actions_for_current_state, new_observed_state_for_this_agent)  #RLcomment

            # 3.8: Request environment to apply new action (Request action on Agent for next step)
            # Action is still not applied here, but on #3.2
            # RB_RLAlgorithm.applyAction(chosen_action, Proudhon.list_of_vehicles[1])   #RLcomment

        # Episode end
        sys.stdout.flush()

        # 4: Update Epsilon after episode is done
        # old_epsilon = RB_RLAlgorithm.epsilon  #RLcomment
        # RB_RLAlgorithm.epsilon = RB_RLAlgorithm.min_epsilon + (RB_RLAlgorithm.max_epsilon - RB_RLAlgorithm.min_epsilon) * \   #RLcomment
        #                         np.exp(-RB_RLAlgorithm.decay_rate * episode_num)  # DONE: Change epsilon to update every episode not every iteration  #RLcomment

        if (episode_num % vis_update_params['every_n_episodes'] == 0):
            print(f'\n\nE:{episode_num: <{6}}| END:{step: <{4}} |'          
                  f'reason: {episode_end_reason: <{15}}')
            print('-'*157)
            print('=' * 157)
            print('\n')

        if (vis_update_params['print_reward_every_episode'] and episode_num % vis_update_params['every_n_episodes'] != 0):
            print(f'E:{episode_num: <{6}}| END:{step: <{4}} | ')
            #      f'finalCumReward: ' + str(episode_reward)[:6] + ' ' * max(0, 6 - len(str(episode_reward))) +" | ")   #RLcomment

        # return RB_RLAlgorithm, Proudhon, episode_reward, episode_reward_list  #RLcomment
        return None, Proudhon, None, None
Esempio n. 34
0
from selenium.webdriver.support.wait import WebDriverWait

# This is where the WebDriver is instantiated. Instead
# of instantiating it directly, use the `env` and
# `env_driver` functions to grab it based on the
# `.env` configuration file.
# This could be written as:
#
#   browser = env("BROWSER")
#   web_driver = env_driver(browser)
#   with quitting(web_driver()) as driver:
#       pass
#


with quitting(env_driver(env("BROWSER"))()) as driver:
    wait = WebDriverWait(driver, 30)

    # Pass the web driver to the site automation along with anything
    # else it might need to do its job. This could include an
    # instance of WebDriverWait, and even the collection of
    # Models.
    google_search = GoogleExample.GoogleSearch(driver, wait, Models)
    bing_search = BingExample.BingSearch(driver, wait, Models)

    # Do stuff with your controllers.
    google_search.do_search('google wiki')
    sleep(5)
    bing_search.do_search('bing wiki')
    sleep(5)