def __init__(self, root, train, transform): """ :param root: the path of data :param transform: transforms to make the output tensor """ self.root = root self.s = 1 self.dlist = [os.path.join(self.root, x) for x in os.listdir(root)] self.transform = transform self.zeros = np.array([0], dtype=np.float64).reshape(-1) self.rotation = np.array([np.linspace(-30, 30, 15)] * self.s).reshape(-1) self.translation = np.array([np.linspace(-5, 5, 3)] * self.s).reshape(-1) self.label = cartesian_product(self.zeros, self.zeros, self.rotation, self.zeros, self.zeros, self.zeros) self.CT = [] # self.drr_win = None # self.vis = visdom.Visdom() # self.num_samples = len(self.dlist) if train: file = open('train_zz.csv', 'w') else: file = open('test_zz.csv', 'w') for f in self.dlist: # path = os.path.join(f, 'xray_256_complex') # if not os.path.isdir(path): # os.mkdir(path) CT = os.path.join(f, 'numpy_RG_npy.npy') # CT_out = np.load(CT) # CT_out = np.expand_dims(np.array(CT_out, dtype=np.float32), axis=-1).transpose((3, 2, 1, 0)) # CT_out = torch.tensor(CT_out) # T = torch.zeros(6, dtype=torch.float32) for i, T in enumerate(self.label, 1): # drr = utils.DRR_generation(torch.tensor(CT_out), torch.tensor(T, dtype=torch.float32).view(1, 6), 1) # drr_path = os.path.join(path, str(int(i))) # np.save(drr_path, drr.cpu().numpy()) m = "{},{}_{}_{}_{}_{}_{}\n".format(CT, str(T[0]), str(T[1]), str(T[2]), str(T[3]), str(T[4]), str(T[5])) file.write(m) # im = drr.view((960, 1240)).cpu().numpy() # self.drr_win = utils.PlotImage(vis=self.vis, img=im, win=self.drr_win, title="DRR") # ct_mean = torch.mean(CT_out) # ct_std = torch.std(CT_out) # CT_out = (CT_out - ct_mean) / ct_std file.close()
def request_line(self, Method=['GET'], Request_URI=['/'], HTTP_Version=['HTTP/1.1'], Space=[' '], Line_CRLF=['\r\n']): ''' 得到该Request的所有可能的Request_Line,存入一个列表(self.Request_Line)中 ''' for request_line_components in utils.cartesian_product( [Method, Request_URI, HTTP_Version, Space, Line_CRLF]): method, request_uri, http_version, space, line_crlf = request_line_components request_line = method + space + request_uri + space + http_version + line_crlf self.Request_Line.append(request_line)
def contour_gpr_2d(gpr, x1s_plt=None, x2s_plt=None, with_kernel=True, with_lml=True, fig_axes=None): if x1s_plt is None: x1s_plt = np.linspace(0, 1, num=100) if x2s_plt is None: x2s_plt = x1s_plt if fig_axes is None: fig_axes = plt.subplots(1, 2, sharex=True, sharey=True, figsize=(14, 6)) fig, (ax_gpr, ax_uncert) = fig_axes xs_plt = utils.cartesian_product(x1s_plt, x2s_plt) preds, pred_stds = gpr.predict(xs_plt, return_std=True) # Contour plot predictions cs = ax_gpr.contourf(x1s_plt, x2s_plt, preds.reshape(len(x2s_plt), len(x1s_plt))) fig.colorbar(cs, ax=ax_gpr) # Plot data points ax_gpr.scatter(gpr.X_train_[:, 0], gpr.X_train_[:, 1], edgecolors='w') # Contour plot uncertainties cs = ax_uncert.contourf(x1s_plt, x2s_plt, pred_stds.reshape(len(x2s_plt), len(x1s_plt))) fig.colorbar(cs, ax=ax_uncert) ax_uncert.scatter(gpr.X_train_[:, 0], gpr.X_train_[:, 1], edgecolors='w') # Set title title = 'GPR kernel: %s' % gpr.kernel_ if with_kernel else '' if with_lml: title += '\n' if with_kernel else '' title += 'LML: %f' % gpr.log_marginal_likelihood(gpr.kernel_.theta) ax_gpr.set_title(title) ax_uncert.set_title('Uncertainty') return fig, (ax_gpr, ax_uncert)
def surf_gpr_2d(gpr, x1s_plt=None, x2s_plt=None, with_kernel=True, with_lml=True, fig_axes=None): if x1s_plt is None: x1s_plt = np.linspace(0, 1, num=100) if x2s_plt is None: x2s_plt = x1s_plt if fig_axes is None: fig = plt.figure(figsize=(14, 6)) ax_gpr = fig.add_subplot(121, projection='3d') ax_uncert = fig.add_subplot(122) fig_axes = fig, (ax_gpr, ax_uncert) fig, (ax_gpr, ax_uncert) = fig_axes xs_plt = utils.cartesian_product(x1s_plt, x2s_plt) preds, pred_stds = gpr.predict(xs_plt, return_std=True) # Surface plot predictions ax_gpr.plot_surface(*np.meshgrid(x1s_plt, x2s_plt), preds.reshape(len(x2s_plt), len(x1s_plt)), alpha=0.25) # Plot data points ax_gpr.scatter(gpr.X_train_[:, 0], gpr.X_train_[:, 1], gpr.y_train_) # Contour plot uncertainties cs = ax_uncert.contourf(x1s_plt, x2s_plt, pred_stds.reshape(len(x2s_plt), len(x1s_plt))) fig.colorbar(cs, ax=ax_uncert) ax_uncert.scatter(gpr.X_train_[:, 0], gpr.X_train_[:, 1], edgecolors='w') # Set title title = 'GPR kernel: %s' % gpr.kernel_ if with_kernel else '' if with_lml: title += '\n' if with_kernel else '' title += 'LML: %f' % gpr.log_marginal_likelihood(gpr.kernel_.theta) ax_gpr.set_title(title) ax_uncert.set_title('Uncertainty') return fig, (ax_gpr, ax_uncert)
def header_copy(self, name='', num=0, value=[], style=[]): ''' 在该Request中,得到名为name的Header对应的所有value和style的组合,存入self.Headers[name]中 ''' #得到value的所有排列,去重 values = utils.permutation(value) #len(style)<num的情况下,用0(表示无空格)填补 style.extend([0] * (num - len(style))) #得到style的所有排列,去重 styles = utils.permutation(style) #对values和styles求迪卡尔积 values_and_styles = utils.cartesian_product([values, styles]) self.Headers[name] = values_and_styles
def balanced_stochastic_update(self, iterations=None): if not iterations is None: epochs = np.ceil(iterations / self.n**2) else: epochs = self.epochs lower_bound = 1 if self.clamp_first_column else 0 i_s = range(0, self.n) j_s = range(lower_bound, self.n) all_pairs = utils.cartesian_product(i_s, j_s) for e in range(0, epochs): np.random.shuffle(all_pairs) for pair in all_pairs: self.update_node(pair[0], pair[1]) return self.report_solution(self.sol_guess)
def plot_current_state(self, state): """ Plot the sol_guess matrix, a circle for each matrix cell, radius proportional to magnitude """ r = range(0, self.n) size = 200 pairs = utils.cartesian_product(r, r) pairs = np.transpose(pairs, (1, 0)) plt.scatter(pairs[0], pairs[1], s=state.reshape(-1)**2 * size, color="blue") plt.grid(which="major") plt.xticks(np.arange(0, self.n)) plt.yticks(np.arange(0, self.n)) #plt.draw() plt.show()
def header_lines(self): ''' 得到该Request的所有可能的Header_Line,存入一个列表(self.Header_Lines)中 该Request中出现的所有Header对应的行都写进一个字符串,一个字符串表示一种Headers(所有Header都包括)的取值 ''' header_name_lst = self.Headers.keys() header_info_lst = self.Headers.values() for header_line_crlf in self.Header_Line_CRLF: for headers_info in utils.cartesian_product(header_info_lst): header_line = '' for header_name, headers_with_same_name in zip( header_name_lst, headers_info): for i in range(len(headers_with_same_name[0])): header_line += self.get_single_header_line( header_name, headers_with_same_name[0][i], headers_with_same_name[1][i], header_line_crlf) self.Header_Lines.append(header_line) return self.Header_Lines
def min_congestion(G, D, hard_cap=False, verbose=False): ''' Compute the multi-commodity flow which minimizes maximum link utilization, through linear programming. input parameters: G is a networkx graph with nodes and edges. Edges must have a 'capacity'. Edge capacity denotes the maximum possible traffic utilization for an edge. It can be set as a hard or soft optimization constraint through the 'hard_cap' parameter. Edges may additionally have a 'cost' attribute used for weighting the maximum link utilization. D is a |V| x |V| demand matrix, represented as a 2D numpy array. |V| here denotes the number of vertices in the graph G. hard_cap is a boolean flag which determines whether edge capacities are treated as hard or soft optimization constraints. verbose is a boolean flag enabling/disabling optimizer printing. return values: f_sol is a routing policy, represented as a numpy array of size |V| x |V| x |E| such that f_sol[s, t, i, j] yields the amount of traffic from source s to destination t that goes through edge (i, j). l_sol is numpy array of size |E| such that l[i, j] represents the total amount of traffic that flows through edge (i, j) under the given flow. m_cong is the maximal congestion for any link weighted by cost. ie max_{(i, j) in E} cost[i, j] * l[i, j] / cap[i, j]. ''' np.fill_diagonal(D, 0) nV = G.number_of_nodes() nE = G.number_of_edges() m = gb.Model('netflow') verboseprint = print if not verbose: verboseprint = lambda *a: None m.setParam('OutputFlag', False) m.setParam('LogToConsole', False) V = np.array([i for i in G.nodes()]) cost = {} for k, e in enumerate(G.edges()): if 'cost' in G[e[0]][e[1]]: cost[e] = G[e[0]][e[1]]['cost'] else: # If costs aren't specified, make uniform. cost[e] = 1.0 cap = {} for k, e in enumerate(G.edges()): cap[e] = G[e[0]][e[1]]['capacity'] arcs, capacity = gb.multidict(cap) # Create variables f = m.addVars(V, V, arcs, obj=cost, name='flow') l = m.addVars(arcs, lb=0.0, name='tot_traf_across_link') # Link utilization is sum of flows. m.addConstrs( (l[i, j] == f.sum('*', '*', i, j) for i, j in arcs), 'l_sum_traf', ) # Arc capacity constraints if hard_cap: verboseprint('Capacity constraints set as hard constraints.') m.addConstrs( (l[i, j] <= capacity[i, j] for i, j in arcs), 'traf_below_cap', ) # Flow conservation constraints for s, t, u in utils.cartesian_product(V, V, V): d = D[int(s), int(t)] if u == s: m.addConstr( f.sum(s, t, u, '*') - f.sum(s, t, '*', u) == d, 'conserv') elif u == t: m.addConstr( f.sum(s, t, u, '*') - f.sum(s, t, '*', u) == -d, 'conserv') else: m.addConstr( f.sum(s, t, u, '*') - f.sum(s, t, '*', u) == 0, 'conserv') # Set objective to max-link utilization (congestion) max_cong = m.addVar(name='congestion') m.addConstrs( ((cost[i, j] * l[i, j]) / capacity[i, j] <= max_cong for i, j in arcs)) m.setObjective(max_cong, gb.GRB.MINIMIZE) # Compute optimal solution m.optimize() # Print solution if m.status == gb.GRB.Status.OPTIMAL: f_sol = m.getAttr('x', f) l_sol = m.getAttr('x', l) m_cong = float(max_cong.x) verboseprint('\nOptimal traffic flows.') verboseprint('f_{i -> j}(s, t) denotes amount of traffic from source' ' s to destination t that goes through link (i, j) in E.') for s, t in utils.cartesian_product(V, V): for i, j in arcs: p = f_sol[s, t, i, j] if p > 0: verboseprint('f_{%s -> %s}(%s, %s): %g bytes.' % (i, j, s, t, p)) verboseprint('\nTotal traffic through link.') verboseprint('l(i, j) denotes the total amount of traffic that passes' ' through edge (i, j).') for i, j in arcs: p = l_sol[i, j] if p > 0: verboseprint('%s -> %s: %g bytes.' % (i, j, p)) verboseprint('\nMaximum weighted link utilization (or congestion):', format(m_cong, '.4f')) else: print(D, m.status) np.savetxt("demand.txt", D) w = np.zeros(nE) cap = np.zeros(nE) cost = np.zeros(nE) e0 = np.zeros(nE) e1 = np.zeros(nE) for k, e in enumerate(G.edges()): w[k] = G[e[0]][e[1]]['weight'] cap[k] = G[e[0]][e[1]]['capacity'] cost[k] = G[e[0]][e[1]]['cost'] e0[k] = e[0] e1[k] = e[1] print(e, G[e[0]][e[1]]['cost'], G[e[0]][e[1]]['capacity'], G[e[0]][e[1]]['weight']) np.savetxt("w.txt", w) np.savetxt("capacity.txt", cap) np.savetxt("cost.txt", cost) np.savetxt("e0.txt", e0) np.savetxt("e1.txt", e1) verboseprint('\nERROR: Flow Optimization Failed!', file=sys.stderr) return None, None, None return f_sol, l_sol, m_cong
def balanced_stochastic_update(self, iterations=None, keep_states=False, sol_guess=None): if not iterations is None: epochs = np.ceil(iterations / self.n**2) else: epochs = self.epochs if sol_guess is None: sol_guess = self.initialize_guess() else: sol_guess = sol_guess.copy() #initial_cost = self.get_cost(sol_guess) #print(initial_cost) #Stop learning_rate = self.learning_rate when_to_force_valid = self.when_to_force_valid force_valid_factor = self.force_valid_factor improve_tour_factor = self.improve_tour_factor inhibition_factor = self.inhibition_factor force_visit_bias = self.force_visit_bias global_inhibition_factor = self.global_inhibition_factor anneal = self.anneal n = self.n indices = np.arange(n) #cost_matrix = self.cost_matrix.copy() cost_matrix = self.cost_matrix lower_bound = 1 if self.clamp_first_column else 0 i_s = range(0, self.n) j_s = range(lower_bound, self.n) all_pairs = utils.cartesian_product(i_s, j_s) # Keep track of states if keep_states: self.states = [] for e in range(0, int(epochs)): # Randomize order np.random.shuffle(all_pairs) VERBOSE = False # Force solution to be valid if e > epochs * when_to_force_valid: if True: if VERBOSE: pass #print("Epoch {}".format(e)) #print(":",improve_tour_factor, inhibition_factor) # too_many_columns.shuffle() # too_few_columns.shuffle() # too_many_rows.shuffle() # Do this once: if e - 1 <= epochs * when_to_force_valid: #print("CHECK") col = sol_guess.sum(axis=1) row = sol_guess.sum(axis=0) # print(col) # print(row) if (row < .1).any() or (row > 1.5).any() or ( col < .1).any() or (col > 1.5).any(): #print("CLEAR") sol_guess = self.shock_out_of_invalid(sol_guess) too_few_columns, too_many_columns, too_few_rows, too_many_rows = self.local_update_tour_factor( sol_guess) for i in too_few_rows: for j in too_few_columns: if VERBOSE: print("too few", i, j) update = self.calculate_update( i, j, n, sol_guess, cost_matrix, improve_tour_factor * force_valid_factor, inhibition_factor, global_inhibition_factor, force_visit_bias, indices) self.update_node(i, j, sol_guess, update, learning_rate) for i in too_many_rows: for j in too_many_columns: if VERBOSE: print("too many", i, j) update = self.calculate_update( i, j, n, sol_guess, cost_matrix, improve_tour_factor, inhibition_factor * force_valid_factor, global_inhibition_factor, force_visit_bias, indices) self.update_node(i, j, sol_guess, update, learning_rate) #improve_tour_factor, inhibition_factor= self.global_update_tour_factor(sol_guess) #print(improve_tour_factor, inhibition_factor) # Random order updates temp = 1 / ((e + 1) * 1 / epochs) / 20 for pair in all_pairs: i = pair[0] j = pair[1] update = self.calculate_update(i, j, n, sol_guess, cost_matrix, improve_tour_factor, inhibition_factor, global_inhibition_factor, force_visit_bias, indices) if e > epochs * .8 or not anneal: sol_guess = self.update_node(i, j, sol_guess, update, learning_rate) else: sol_guess = self.annealing_update(i, j, sol_guess, update, learning_rate, temperature=temp) if keep_states: self.states.append(sol_guess.copy()) # cost = self.get_cost(sol_guess) # print(sol_guess, cost) return self.report_solution(sol_guess)
def __init__(self, root, train, transform): """ :param root: the path of data :param transform: transforms to make the output tensor """ self.root = root self.s = 1 self.dlist = [os.path.join(self.root, x) for x in os.listdir(root)] self.transform = transform self.zeros = np.array([0], dtype=np.float64).reshape(-1) self.rotation = np.array([np.linspace(-30, 30, 15)] * self.s).reshape(-1) self.translation = np.array([np.linspace(-5, 5, 3)] * self.s).reshape(-1) self.label = cartesian_product(self.zeros, self.zeros, self.rotation, self.zeros, self.zeros, self.zeros) self.CT = [] # self.drr_win = None # self.vis = visdom.Visdom() # self.num_samples = len(self.dlist) if train: file = open('train_zzz.csv', 'w') else: file = open('test_zzz.csv', 'w') for f in self.dlist: # path = os.path.join(f, 'xray') # if not os.path.isdir(path): # os.mkdir(path) CT = os.path.join(f, 'numpy_RG_npy.npy') CT = np.load(CT) CT_out = np.expand_dims(np.array(CT, dtype=np.float32), axis=-1).transpose((3, 2, 1, 0)) CT_out = torch.tensor(CT_out) catheter = [] while len(catheter) == 0: a = skeleton2.mapping(CT) skel = a.skel xyz = np.where(skel == 1) idx = np.random.randint(len(xyz[0]), size=2) sp = np.array([xyz[0][idx[0]], xyz[1][idx[0]], xyz[2][idx[0]]]) fp = np.array([xyz[0][idx[1]], xyz[1][idx[1]], xyz[2][idx[1]]]) catheter = a.get_road(sp, fp) catheter = np.array(catheter) C = np.zeros_like(CT) C[catheter[:, 0], catheter[:, 1], catheter[:, 2]] = 1 C = np.expand_dims(np.array(C, dtype=np.float32), axis=-1).transpose((3, 2, 1, 0)) C = torch.tensor(C) # T = torch.zeros(6, dtype=torch.float32) for i, T in enumerate(self.label, 1): drr = utils.DRR_generation( C, torch.tensor(T, dtype=torch.float32).view(1, 6), 1, [256, 256]) drr_path = os.path.join( f, "{}_{}_{}_{}_{}_{}".format(str(T[0]), str(T[1]), str(T[2]), str(T[3]), str(T[4]), str(T[5]))) np.save(drr_path, drr.cpu().numpy()) # m = "{}_{}_{}_{}_{}_{}_{}\n".format(f, str(T[0]), str(T[1]), str(T[2]), str(T[3]), str(T[4]), str(T[5])) # file.write(m) # im = drr.view((960, 1240)).cpu().numpy() # self.drr_win = utils.PlotImage(vis=self.vis, img=im, win=self.drr_win, title="DRR") # ct_mean = torch.mean(CT_out) # ct_std = torch.std(CT_out) # CT_out = (CT_out - ct_mean) / ct_std file.close()