def RandomGraph(nodes=list(np.arange(10)), probability=0.1, width=400, height=300, curvature=lambda: np.random.uniform(1.1, 1.5)): """Construct a random graph, with the specified nodes, and random links. The nodes are laid out randomly on a (width x height) rectangle. Then each node is connected to the min_links nearest neighbors. Because inverse links are added, some nodes will have more connections. The distance between nodes is the hypotenuse times curvature(), where curvature() defaults to a random number between 1.1 and 1.5.""" min_links = int(probability * len(nodes)) g = UndirectedGraph() g.locations = {} # Build the nodes for node in nodes: g.locations[node] = (np.random.randint(width), np.random.randint(height)) # Build edges from each node to at least min_links nearest neighbors. for _ in range(min_links): for node in nodes: if len(g.get(node)) < min_links: here = g.locations[node] def distance_to_node(n): if n is node or g.get(node, n): return infinity return distance(g.locations[n], here) neighbor = argmin(nodes, key=distance_to_node) d = distance(g.locations[neighbor], here) * curvature() g.connect(node, neighbor, int(d)) return g
def pmus_ingmar(fs, rr, pp, tp, tf): """ Sinusoidal profile :param fs: sample frequency :param rr: respiratory rate :param pp: peak pressure :param tp: peak time :param tf: end of effort :return: pmus profile """ ntp = np.floor(tp * fs) ntf = np.floor(tf * fs) ntN = np.floor(60.0 * fs / rr) pmus1 = np.sin(np.pi * np.arange(0, ntp + 1, 1) / fs / 2.0 / tp) pmus2 = np.sin(np.pi / 2.0 / (tf - tp) * (np.arange(ntp + 1, ntf + 1, 1) / fs + tf - 2.0 * tp)) pmus3 = 0 * np.arange(ntf + 1, ntN + 1, 1) / fs pmus = pp * np.concatenate((pmus1, pmus2, pmus3)) return pmus
def ConnectedGraph(nodes=list(np.arange(10)), min_links=2, width=400, height=300, curvature=lambda: np.random.uniform(1.1, 1.5)): """Construct a random connected graph.""" g = RandomGraph(nodes, min_links, width, height, curvature) c_cs = connected_components(g) for i in range(len(c_cs) - 1): # Pick two random node from different components # and then connect them g.connect(np.random.choice(c_cs[i]), np.random.choice(c_cs[i + 1])) return g
def pmus_parexp(fs, rr, pp, tp, tf): """ Parabolic-exponential profile :param fs: sample frequency :param rr: respiratory rate :param pp: peak pressure :param tp: peak time :param tf: end of effort :return: pmus profile """ ntp = np.floor(tp * fs) ntN = np.floor(60.0 / rr * fs) taur = abs(tf - tp) / 4.0 pmus1 = pp * (60.0 * rr - np.arange(0, ntp + 1, 1) / fs) * ( np.arange(0, ntp + 1, 1) / fs) / (tp * (60.0 * rr - tp)) pmus2 = pp * (np.exp(-(np.arange(ntp + 1, ntN + 1, 1) / fs - tp) / taur) - np.exp(-(60.0 * rr - tp) / taur)) / ( 1.0 - np.exp(-(60.0 * rr - tp) / taur)) pmus = np.concatenate((pmus1, pmus2)) return pmus
def read_channels(channels, latitudes, longitudes, dfb_beginning, dfb_ending, slot_step=1): dir, pattern = read_channels_dir_and_pattern() satellite = read_satellite_name() satellite_step = read_satellite_step() nb_slots = get_nb_slots_per_day(satellite_step, slot_step) patterns = [ pattern.replace("{SATELLITE}", satellite).replace('{CHANNEL}', chan) for chan in channels ] nb_days = dfb_ending - dfb_beginning + 1 content = np.empty( (nb_slots * nb_days, len(latitudes), len(longitudes), len(patterns))) start = read_start_slot() for k in range(len(patterns)): pattern = patterns[k] chan = channels[k] dataset = DataSet.read( dirs=dir, extent={ 'latitude': latitudes, 'longitude': longitudes, 'dfb': { 'start': dfb_beginning, 'end': dfb_ending, "end_inclusive": True, 'start_inclusive': True, }, 'slot': np.arange(start, start + nb_slots, step=slot_step) }, file_pattern=pattern, variable_name=chan, fill_value=np.nan, interpolation='N', max_processes=0, ) data = dataset['data'].data day_slot_b = 0 day_slot_e = nb_slots for day in range(nb_days): content[day_slot_b:day_slot_e, :, :, k] = data[day] day_slot_b += nb_slots day_slot_e += nb_slots return content
def read_classes(latitudes, longitudes, dfb_beginning, dfb_ending, slot_step=1): dir, pattern = read_indexes_dir_and_pattern('classes') satellite_step = read_satellite_step() nb_slots = get_nb_slots_per_day(satellite_step, slot_step) nb_days = dfb_ending - dfb_beginning + 1 content = np.empty((nb_slots * nb_days, len(latitudes), len(longitudes))) dataset = DataSet.read( dirs=dir, extent={ 'latitude': latitudes, 'longitude': longitudes, 'dfb': { 'start': dfb_beginning, 'end': dfb_ending, "end_inclusive": True, 'start_inclusive': True, }, 'slot': { "enumeration": np.arange(0, nb_slots, step=slot_step), "override_type": "slot" }, }, file_pattern=pattern, variable_name='Classes', fill_value=np.nan, interpolation='N', max_processes=0, ) data = dataset['data'].data day_slot_b = 0 day_slot_e = nb_slots for day in range(nb_days): content[day_slot_b:day_slot_e, :, :] = data[day] day_slot_b += nb_slots day_slot_e += nb_slots return content
def pmus_linear(fs, rr, pp, tp, tf): """ Linear profile :param fs: sample frequency :param rr: respiratory rate :param pp: peak pressure :param tp: peak time :param tf: end of effort :return: pmus profile """ nsamples = np.floor(60.0 / rr * fs) time = np.arange(0, nsamples + 1, 1) / fs pmus = 0 * time for i in range(len(time)): if time[i] <= tp: pmus[i] = time[i] / tp elif time[i] <= tf: pmus[i] = (tf - time[i]) / (tf - tp) else: pmus[i] = 0.0 pmus[i] = pp * pmus[i] return pmus
def solve_model(header_params,params,header_features,features,debugmsg): #Extracts each parameter fs = params[header_params.index('Fs')] rvent = params[header_params.index('Rvent')] c = params[header_params.index('C')] rins = params[header_params.index('Rins')] rexp = rins # params[4] peep = params[header_params.index('PEEP')] sp = params[header_params.index('SP')] trigger_type = features[header_features.index('Triggertype')] trigger_arg = params[header_params.index('Triggerarg')] rise_type = features[header_features.index('Risetype')] rise_time = params[header_params.index('Risetime')] cycle_off = params[header_params.index('Cycleoff')] rr = params[header_params.index('RR')] pmus_type = features[header_features.index('Pmustype')] pp = params[header_params.index('Pp')] tp = params[header_params.index('Tp')] tf = params[header_params.index('Tf')] noise = params[header_params.index('Noise')] e2 = params[header_params.index('E2')] model = features[header_features.index('Model')] expected_len = int(np.floor(180.0 / np.min(RR) * np.max(Fs)) + 1) #Assings pmus profile pmus = pmus_profile(fs, rr, pmus_type, pp, tp, tf) pmus = pmus + peep #adjusts PEEP pmus = np.concatenate((np.array([0]), pmus)) #sets the first value to zero #Unit conversion from cmH2O.s/L to cmH2O.s/mL rins = rins / 1000.0 rexp = rexp / 1000.0 rvent = rvent / 1000.0 #Generates time, flow, volume, insex and paw waveforms time = np.arange(0, np.floor(60.0 / rr * fs) + 1, 1) / fs time = np.concatenate((np.array([0]), time)) flow = np.zeros(len(time)) volume = np.zeros(len(time)) insex = np.zeros(len(time)) paw = np.zeros(len(time)) + peep #adjusts PEEP len_time = len(time) #Peak flow detection peak_flow = flow[0] detect_peak_flow = False #Support detection detect_support = False time_support = -1 #Expiration detection detect_exp = False time_exp = -1 if trigger_type == 'flow': # units conversion from L/min to mL/s trigger_arg = trigger_arg / 60.0 * 1000.0 for i in range(1, len(time)): # period until the respiratory effort beginning if (((trigger_type == 'flow' and flow[i] < trigger_arg) or (trigger_type == 'pressure' and paw[i] > trigger_arg + peep) or (trigger_type == 'delay' and time[i] < trigger_arg)) and (not detect_support) and (not detect_exp)): paw[i] = peep y0 = volume[i - 1] tspan = [time[i - 1], time[i]] args = (paw[i], pmus[i], model, c, e2, rins) sol = odeint(flow_model, y0, tspan, args=args) volume[i] = sol[-1] flow[i] = flow_model(volume[i], time[i], paw[i], pmus[i], model, c, e2, rins) if debugmsg: print('volume[i]= {:.2f}, flow[i]= {:.2f}, paw[i]= {:.2f}, waiting'.format(volume[i], flow[i], paw[i])) if (((trigger_type == 'flow' and flow[i] >= trigger_arg) or (trigger_type == 'pressure' and paw[i] <= trigger_arg + peep) or (trigger_type == 'delay' and time[i] >= trigger_arg))): detect_support = True time_support = time[i+1] continue # detection of inspiratory effort # ventilator starts to support the patient elif (detect_support and (not detect_exp)): if rise_type == 'step': paw[i] = sp + peep elif rise_type == 'exp': rise_type = rise_type if np.random.random() > 0.01 else 'linear' if paw[i] < sp + peep: paw[i] = (1.0 - np.exp(-(time[i] - time_support) / rise_time )) * sp + peep if paw[i] >= sp + peep: paw[i] = sp + peep elif rise_type == 'linear': rise_type = rise_type if np.random.random() > 0.01 else 'exp' if paw[i] < sp + peep: paw[i] = (time[i] - time_support) / rise_time * sp + peep if paw[i] >= sp + peep: paw[i] = sp + peep y0 = volume[i - 1] tspan = [time[i - 1], time[i]] args = (paw[i], pmus[i], model, c, e2, rins) sol = odeint(flow_model, y0, tspan, args=args) volume[i] = sol[-1] flow[i] = flow_model(volume[i], time[i], paw[i], pmus[i], model, c, e2, rins) if debugmsg: print('volume[i]= {:.2f}, flow[i]= {:.2f}, paw[i]= {:.2f}, supporting'.format(volume[i], flow[i], paw[i])) if flow[i] >= flow[i - 1]: peak_flow = flow[i] detect_peak_flow = False elif flow[i] < flow[i - 1]: detect_peak_flow = True if (flow[i] <= cycle_off * peak_flow) and detect_peak_flow and i<len_time: detect_exp = True time_exp = i+1 try: paw[i + 1] = paw[i] except IndexError: pass elif detect_exp: if rise_type == 'step': paw[i] = peep elif rise_type == 'exp': if paw[i - 1] > peep: paw[i] = sp * (np.exp(-(time[i] - time[time_exp-1]) / rise_time )) + peep if paw[i - 1] <= peep: paw[i] = peep elif rise_type == 'linear': rise_type = rise_type if np.random.random() > 0.01 else 'exp' if paw[i - 1] > peep: paw[i] = sp * (1 - (time[i] - time[time_exp-1]) / rise_time) + peep if paw[i - 1] <= peep: paw[i] = peep y0 = volume[i - 1] tspan = [time[i - 1], time[i]] args = (paw[i], pmus[i], model, c, e2, rexp + rvent) sol = odeint(flow_model, y0, tspan, args=args) volume[i] = sol[-1] flow[i] = flow_model(volume[i], time[i], paw[i], pmus[i], model, c, e2, rexp + rvent) if debugmsg: print('volume[i]= {:.2f}, flow[i]= {:.2f}, paw[i]= {:.2f}, exhaling'.format(volume[i], flow[i], paw[i])) #Generates InsEx trace if time_exp > -1: insex = np.concatenate((np.ones(time_exp), np.zeros(len(time) - time_exp))) #Drops the first element flow = flow[1:] / 1000.0 * 60.0 # converts back to L/min volume = volume[1:] paw = paw[1:] pmus = pmus[1:] - peep #reajust peep again insex = insex[1:] flow,volume,pmus,insex,paw = generate_cycle(expected_len,flow,volume,pmus,insex,paw,peep=peep) # paw = generate_cycle(expected_len,paw,peep=peep)[0] flow,volume,paw,pmus,insex = generate_noise(noise,flow,volume,paw,pmus,insex) # plt.plot(flow) # plt.plot(volume) # plt.plot(paw) # plt.plot(pmus) # plt.show() return flow, volume, paw, pmus, insex, rins,rexp, c
num_samples = flow.shape[1] (min_flow, max_flow, flow) = normalize_data(flow) (min_volume, max_volume, volume) = normalize_data(volume) (min_paw, max_paw, paw) = normalize_data(paw) (min_resistance, max_resistance, resistances) = normalize_data(resistances) (min_capacitance, max_capacitance, capacitances) = normalize_data(capacitances) print("normalized data") input_data = np.zeros((num_examples, num_samples, 3)) input_data[:, :, 0] = flow input_data[:, :, 1] = volume input_data[:, :, 2] = paw output_data = np.concatenate((resistances, capacitances), axis=1) indices = np.arange(num_examples) print("input created") input_train, input_test, output_train, output_test, indices_train, indices_test = \ train_test_split(input_data, output_data, indices, test_size=0.3, shuffle=False) input_validation, input_test, output_validation, output_test, indices_validation, indices_test = \ train_test_split(input_test, output_test, indices_test, test_size=0.5, shuffle=False) np.save('./data/input_test.npy', input_test) np.save('./data/output_test.npy', output_test) print("before CNN") model = CNN_Model(num_samples, input_volume=3).get_model()
err_pmus = [] # R_hat = np.average([denormalize_data(output_pred_test[i, 0], minimum=min_resistances, maximum=max_resistances) for i in range(num_examples)]) # C_hat = np.average([denormalize_data(output_pred_test[i, 1], minimum= min_capacitances, maximum= max_capacitances) for i in range(num_examples)]) R_hat = denormalize_data(output_pred_test[0, 0], minimum=min_resistances, maximum=max_resistances) C_hat = denormalize_data(output_pred_test[0, 1], minimum=min_capacitances, maximum=max_capacitances) alpha = 0.2 rr = min(RR) fs = max(Fs) time = np.arange(0, np.floor(180.0 / rr * fs) + 1, 1) / fs err_pmus_hat = [] err_nmsre = [] for i in range(num_examples - 1): # R_hat = alpha*denormalize_data(output_pred_test[i, 0], minimum=min_resistances, maximum=max_resistances) + (1-alpha)*R_hat # C_hat = alpha*denormalize_data(output_pred_test[i, 1], minimum= min_capacitances, maximum= max_capacitances) + (1-alpha)*C_hat R_hat = denormalize_data(output_pred_test[i, 0], minimum=min_resistances, maximum=max_resistances) C_hat = denormalize_data(output_pred_test[i, 1], minimum=min_capacitances, maximum=max_capacitances) # R = denormalize_data(output_data[i, 0], min_resistances, max_resistances)
def train(rank, args, shared_model, optimizer, env_conf): ptitle('Training Agent: {}'.format(rank)) gpu_id = args.gpu_ids[rank % len(args.gpu_ids)] torch.manual_seed(args.seed + rank) if gpu_id >= 0: torch.cuda.manual_seed(args.seed + rank) env = atari_env(args.env, env_conf, args) if optimizer is None: if args.optimizer == 'RMSprop': optimizer = optim.RMSprop(shared_model.parameters(), lr=args.lr) if args.optimizer == 'Adam': optimizer = optim.Adam(shared_model.parameters(), lr=args.lr, amsgrad=args.amsgrad) env.seed(args.seed + rank) tp_weight = args.tp player = Agent(None, env, args, None) player.gpu_id = gpu_id player.model = A3Clstm(player.env.observation_space.shape[0], player.env.action_space, args.terminal_prediction, args.reward_prediction) player.state = player.env.reset() player.state = torch.from_numpy(player.state).float() if gpu_id >= 0: with torch.cuda.device(gpu_id): player.state = player.state.cuda() player.model = player.model.cuda() player.model.train() # Below is where the cores are running episodes continously ... average_ep_length = 0 while True: if gpu_id >= 0: with torch.cuda.device(gpu_id): player.model.load_state_dict(shared_model.state_dict()) else: player.model.load_state_dict(shared_model.state_dict()) if player.done: if gpu_id >= 0: with torch.cuda.device(gpu_id): player.cx = Variable(torch.zeros(1, 128).cuda()) player.hx = Variable(torch.zeros(1, 128).cuda()) else: player.cx = Variable(torch.zeros(1, 128)) player.hx = Variable(torch.zeros(1, 128)) else: player.cx = Variable(player.cx.data) player.hx = Variable(player.hx.data) for step in range(args.num_steps): player.eps_len += 1 player.action_train() if player.done: break if player.done: state = player.env.reset() player.state = torch.from_numpy(state).float() if gpu_id >= 0: with torch.cuda.device(gpu_id): player.state = player.state.cuda() R = torch.zeros(1, 1) if not player.done: value, _, _, _, _ = player.model( (Variable(player.state.unsqueeze(0)), (player.hx, player.cx))) R = value.data if gpu_id >= 0: with torch.cuda.device(gpu_id): R = R.cuda() player.values.append(Variable(R)) policy_loss = 0 value_loss = 0 reward_pred_loss = 0 terminal_loss = 0 gae = torch.zeros(1, 1) if gpu_id >= 0: with torch.cuda.device(gpu_id): gae = gae.cuda() R = Variable(R) # TODO why this is here? for i in reversed(range(len(player.rewards))): R = args.gamma * R + player.rewards[i] advantage = R - player.values[i] value_loss = value_loss + 0.5 * advantage.pow(2) # Generalized Advantage Estimataion delta_t = player.rewards[i] + args.gamma * player.values[ i + 1].data - player.values[i].data gae = gae * args.gamma * args.tau + delta_t policy_loss = policy_loss - player.log_probs[i] * Variable( gae) - 0.01 * player.entropies[i] if args.reward_prediction: reward_pred_loss = reward_pred_loss + ( player.reward_predictions[i] - player.rewards[i]).pow(2) if args.terminal_prediction: # new way of using emprical episode length as a proxy for current length. if player.average_episode_length is None: end_predict_labels = np.arange( player.eps_len - len(player.terminal_predictions), player.eps_len) / player.eps_len # heuristic else: end_predict_labels = np.arange( player.eps_len - len(player.terminal_predictions), player.eps_len) / player.average_episode_length for i in range(len(player.terminal_predictions)): terminal_loss = terminal_loss + ( player.terminal_predictions[i] - end_predict_labels[i]).pow(2) terminal_loss = terminal_loss / len(player.terminal_predictions) player.model.zero_grad() #print(f"policy loss {policy_loss} and value loss {value_loss} and terminal loss {terminal_loss} and reward pred loss {reward_pred_loss}") total_loss = policy_loss + 0.5 * value_loss + tp_weight * terminal_loss + 0.5 * reward_pred_loss total_loss.backward() # will free memory ... # Visualize Computation Graph #graph = make_dot(total_loss) #from graphviz import Source #Source.view(graph) ensure_shared_grads(player.model, shared_model, gpu=gpu_id >= 0) optimizer.step() player.clear_actions() if player.done: if player.average_episode_length is None: # initial one player.average_episode_length = player.eps_len else: player.average_episode_length = int( 0.99 * player.average_episode_length + 0.01 * player.eps_len) #print(player.average_episode_length, 'current one is ', player.eps_len) player.eps_len = 0 # reset here