Example #1
0
def swmm_states(Network, state):
    temp = []
    for i in Network:
        temp.append(swmm.get(i, state, swmm.SI))
    temp = np.asarray(temp)
    temp = np.reshape(temp, (1, len(temp)))
    return temp
Example #2
0
def swmm_track(pond, attributes=["depth", "inflow", "outflow", "flooding"], controlled=False):
    att_commands = {'depth': swmm.DEPTH,
                    'inflow': swmm.INFLOW,
                    'outflow': swmm.FLOW,
                    'flooding': swmm.FLOODING}
    temp = []
    for i in attributes:
        if i == 'outflow':
            if controlled:
                temp.append(swmm.get(pond.orifice_id, att_commands[i], swmm.SI))
            else:
                temp.append(0.0)
        else:
            temp.append(swmm.get(pond.pond_id, att_commands[i], swmm.SI))
    temp = np.asarray(temp)
    return temp
    # Simulation Tracker
    reward_sim = []
    outflow_sim = []

    print "episode number :", episode_tracker
    print "exploration :", epsilon_value[episode_tracker]

    while episode_timer < time_sim:
        t_epsi += 0
        episode_timer += 1

        # Look at whats happening
        # Heights
        temp_height = np.asarray(
            [swmm.get(i, swmm.DEPTH, swmm.SI) for i in nodes_list])

        # Gate Positions
        temp_gate = np.asarray(gates.current_gate)

        # Input States
        input_states = np.append(
            temp_height, temp_gate).reshape(
            1, len(temp_height) + len(temp_gate))

        # Action
        q_values = prof_x.ac_model.predict_on_batch(input_states)

        # Policy
        action = epsi_greedy(len(action_space), q_values,
                             epsilon_value[episode_tracker])
Example #4
0
            agents_dqn[i].state_vector = swmm_states(states_controlled[i],
                                                     swmm.DEPTH)
        # Take action
        for i in nodes_controlled.keys():
            action_step = agents_dqn[i].actions_q(epsilon_value[time_sim],
                                                  action_space)
            agents_dqn[i].action_vector = action_step / 100.0
            swmm.modify_setting(controlled_ponds[i].orifice_id,
                                agents_dqn[i].action_vector)
            current_gate = agents_dqn[i].action_vector

        # SWMM step
        swmm.run_step()

        # Receive the new rewards
        outflow = swmm.get('ZOF1', swmm.INFLOW, swmm.SI)
        water_level = swmm.get('93-50077', swmm.DEPTH, swmm.SI)
        outflow_track.append(outflow)
        overflows = swmm_states(all_nodes, swmm.FLOODING)
        gate_change = np.abs(current_gate - temp_gate)
        temp_gate = current_gate
        r_temp = reward_function(water_level, outflow, gate_change)

        for i in nodes_controlled.keys():
            agents_dqn[i].rewards_vector = r_temp

        # Observe the new states
        for i in nodes_controlled.keys():
            agents_dqn[i].state_new_vector = swmm_states(
                states_controlled[i], swmm.DEPTH)
        # Update Replay Memory
Example #5
0
    # Simulation Tracker
    reward_sim = []
    outflow_sim = []

    print "episode number :", episode_tracker
    print "exploration :", epsilon_value[episode_tracker]

    while episode_timer < time_sim:
        t_epsi += 1
        episode_timer += 1

        # Look at whats happening
        # Heights
        temp_height = np.asarray(
            [swmm.get(i, swmm.DEPTH, swmm.SI) for i in nodes_list])

        # Gate Positions
        temp_gate = np.asarray(gates.current_gate)

        # Input States
        input_states = np.append(temp_height, temp_gate).reshape(
            1,
            len(temp_height) + len(temp_gate))

        # Action
        q_values = prof_x.ac_model.predict_on_batch(input_states)

        # Policy
        action = epsi_greedy(len(action_space), q_values,
                             epsilon_value[episode_tracker])
    swmm.initialize(inp)
    # Simulation Tracker
    reward_sim = []
    outflow_sim = []

    print "episode number :", episode_tracker
    print "exploration :", epsilon_value[episode_tracker]

    while episode_timer < time_sim:
        t_epsi += 1
        episode_timer += 1
        # print(episode_timer)
        # Look at whats happening
        # Heights
        temp_height = np.asarray(
            [swmm.get(i, swmm.DEPTH, swmm.SI) for i in con_ponds])

        # Gate Positions
        temp_gate = np.asarray(gates.current_gate)
        if episode_timer % 5000 == 0:
            print(episode_timer, swmm.get())
        # Input States
        input_states = np.append(temp_height, temp_gate).reshape(
            1,
            len(temp_height) + len(temp_gate))

        # Action
        q_values = prof_x.ac_model.predict_on_batch(input_states)

        # Policy
        action = epsi_greedy(len(action_space), q_values,
Example #7
0
# ***********************************************************************
#  Step Running
# ***********************************************************************

# Main loop: finished when the simulation time is over.
while( not swmm.is_over() ): 

	# ----------------- Run step and retrieve simulation time -----------
	
	time.append( swmm.get_time() )
	swmm.run_step()  # Step 2
	
	# --------- Retrieve & modify information during simulation ---------
	# Retrieve information about flow in C-5
	f = swmm.get('C-5', swmm.FLOW, swmm.SI)   
	# Stores the information in the flow vector
	flow.append(f)					 
	# Retrieve information about volume in V-1
	v = swmm.get('V-1', swmm.VOLUME, swmm.SI) 
	# Stores the information in the volume vector
	vol.append(v)					 

	# --------------------------- Control Actions ------------------------
	
	# If the flow in C-5 is greater or equal than 2 m3/s the setting 
	# upstream of the link is completely closed, else it is completely 
	# opened.

	if f >= 2:
		swmm.modify_setting('R-4', 0)
Example #8
0
                                                     swmm.DEPTH)
        # Take action
        for i in nodes_controlled.keys():
            action_step = agents_dqn[i].actions_q(epsilon_value[time_sim],
                                                  action_space)
            agents_dqn[i].action_vector = action_step / 100.0
            swmm.modify_setting(controlled_ponds[i].orifice_id,
                                agents_dqn[i].action_vector)
            current_gate = agents_dqn[i].action_vector


        # SWMM step
        swmm.run_step()

        # Receive the new rewards
        outflow = swmm.get('ZOF1', swmm.INFLOW, swmm.SI)
        outflow_track.append(outflow)
        overflows = swmm_states(all_nodes, swmm.FLOODING)
        gate_change = np.abs(current_gate - temp_gate)
        temp_gate = current_gate
        depth = swmm.get("93-50077", swmm.DEPTH, swmm.SI)
        floos_1  = swmm.get("93-50077", swmm.FLOODING, swmm.SI)
        r_temp = reward_function(depth, outflow, gate_change, floos_1)

        for i in nodes_controlled.keys():
            agents_dqn[i].rewards_vector = r_temp

        # Observe the new states
        for i in nodes_controlled.keys():
            agents_dqn[i].state_new_vector = swmm_states(states_controlled[i],
                                                         swmm.DEPTH)