def merge(rollouts): '''Merges all collected rollouts for batched compatibility with optim.backward''' outs = { 'value': [], 'return': [], 'action': defaultdict(lambda: defaultdict(list)) } for rollout in rollouts.values(): for idx in range(rollout.time): try: key, atn, out = rollout.outs[idx] except: print(rollout.time) print(len(rollout)) print(len(rollout.returns)) print(len(rollout.outs)) print('----') T() val = rollout.vals[idx] ret = rollout.returns[idx] outs['value'].append(val) outs['return'].append(ret) for k, o, a in zip(key, out, atn): k = tuple(k) outk = outs['action'][k] outk['atns'].append(o) outk['idxs'].append(a) outk['vals'].append(val) outk['rets'].append(ret) return outs
def stats(criterion, a, y): #maskCriterion = nn.CrossEntropyLoss(ignore_index=0) pPred, aPred = a p, a = y batch, sLen, vocab = pPred.size() pPred = pPred.view(-1, vocab) loss = 0 progAcc = 0 test = p is None if not test: p = p.view(-1) progLoss = criterion(pPred, p) #progLoss = maskCriterion(pPred, p) loss += progLoss mask = p.data != 0 numLabels = t.sum(mask) _, preds = t.max(pPred.data, 1) progAcc = t.sum(mask * (p.data == preds)) / numLabels if progAcc > 1.0: T() ansLoss = criterion(aPred, a) loss += ansLoss _, preds = t.max(aPred.data, 1) ansAcc = t.mean((a.data == preds).float()) return loss, (progAcc, ansAcc)
def eqnAns(eqn): class eqnNum: def __init__(self, val): self.val = val stack = [] for tok in eqn: tok = tok.data[0] if tok < 3: pass elif tok < 13: stack.append(tok-3) else: arg2 = stack.pop() arg1 = stack.pop() if tok == 13: ret = arg1 + arg2 elif tok == 14: ret = arg1 - arg2 elif tok == 15: ret = arg1 * arg2 elif tok == 16: ret = arg1 / arg2 else: T() if ret is None: return None stack.append(ret) return stack.pop()
def merge(rollouts): '''Merges all collected rollouts for batched compatibility with optim.backward''' outs = {'value': [], 'return': [], 'action': defaultdict(lambda: defaultdict(list))} for rollout in rollouts.values(): for idx in range(rollout.time): try: key, atn, out = rollout.outs[idx] except: print(rollout.time) print(len(rollout)) print(len(rollout.returns)) print(len(rollout.outs)) print('----') T() val = rollout.vals[idx] ret = rollout.returns[idx] outs['value'].append(val) outs['return'].append(ret) #Going to have to change to key by atn type (move, attk, etc) for k, packet in enumerate(zip(key, out, atn)): _, o, a = packet #k = tuple([k]) outk = outs['action'][k] outk['atns'].append(o) outk['idxs'].append(a) outk['vals'].append(val) outk['rets'].append(ret) return outs
def forward(self, conv, flat, ents): ents = self.ent1(ents) T() ents = self.attn(ents) ents = ents.view(-1) x = torch.cat((conv.view(-1), flat, ents)).view(1, -1) x = torch.nn.functional.relu(self.fc1(x)) return x
def render(self, rend_idx): rend_state = self.state[rend_idx].cpu() T() rend_state = np.vstack( (rend_state * 1, rend_state * 1, rend_state * 1)) rend_arr = rend_state rend_arr = rend_arr.transpose(1, 2, 0) rend_arr = rend_arr.astype(np.uint8) rend_arr = rend_arr * 255 return rend_arr
def network_simulation(): nw = load_network('data/queueing_params.mat') target = get_availabilities(nw.station_names) bal_rates, bal_routing = nw.balance() nw.combine() n = Network(nw.size, nw.rates, nw.travel_times, nw.routing, [20] * nw.size) for i in range(100): if i % 10 == 0: print i n.jump() T()
def on_key_down(self, *args): text = args[3] if text == 'i': #Toggle isometric trans = self.renderOffsets(self.H, self.H) self.view.toggleEnv(trans) elif text == 'p': T() elif text == '[': self.view.leftScreenshot() else: #Toggle overlay self.view.key(text)
def cal_logo_experiment(adj): nw = load_network('data/queueing_params.mat') target = get_availabilities(nw.station_names) bal_rates, bal_routing = nw.balance() nw.combine() res = [] for i in adj: nw.update_adjacency(i) att_rates, att_routing = nw.min_attack(target, full_adj=False) T() res.append(int(np.sum(att_rates))) print 'Passenger Arrival Rate:', np.sum(nw.rates) print 'Balance Cost: ', np.sum(bal_rates) print 'Attack After Balance Cost (adjacency {}): {}'.format(adj, res) return res
def optimal_attack_with_radius(r, save_to=None): # try to compute the optimal attacks with different radii of adjacencies nw = load_network('data/queueing_params.mat') nw.set_weights_to_min_time_usage() #nw.rates += np.ones(nw.size) * 100 nw.balance() nw.combine() nw.budget = 1000 nw.update_adjacency(r) # k has been pre-processed and is given by best_single_destination_attack() k = 302 nw.optimal_attack(max_iters=1, full_adj=False, alpha=10., beta=1., \ max_iters_attack_rate=3, k=k) rates = nw.attack_rates / (nw.attack_rates + nw.rates) T() if save_to: obj = {'rates': rates, 'routing': nw.attack_routing} pickle.dump(obj, open(save_to, 'wb'))
def compare(): p1 = json.load(open('f_patch1.json')) p2 = json.load(open('f_patch2.json')) diffs = [a2[1] - a1[1] for a1, a2 in zip(p1, p2) if a1 != None and a2 != None] T()
def cube(self, tile): DEFAULT_VERTEX_FORMAT = [(b'v_tc0', 2, 'float'), (b'v_normal', 3, 'float'), (b'v_pos', 3, 'float')] obj = self.blocks[tile].obj T() obj = pywave.Wavefront('tex/block.obj', collect_faces=True) material = obj.materials['grass'] cube = obj.meshes['Cube'] vertices = obj.vertices faces = cube.faces grass = obj.materials['grass'] dirt = obj.materials['dirt'] vertices = grass.vertices + dirt.vertices #indices = np.array(faces).ravel().tolist() indices = np.arange(36).astype(int).tolist() #vertices = np.array(vertices).ravel().tolist() tex = Image('tex/grass.png').texture mat = Material(tex) kw = { "vertices": vertices, "indices": indices, "fmt": DEFAULT_VERTEX_FORMAT, "mode": "triangles", 'texture': tex } #if self.material.map: # kw["texture"] = self.material.map mesh = KivyMesh(**kw) class Meshy(Object3D): def __init__(self, mesh, material): super().__init__() self._mesh = mesh self.material = material self.mtl = material self.vertex_format = DEFAULT_VERTEX_FORMAT cube = Meshy(mesh, tex) #cube.material = orig.material #cube.geometry = orig.geometry orig._mesh = cube._mesh orig.material = mat cube = orig #cube = kivy3.Mesh([], material) if tile == 'lava': cube.pos.y = -0.5 elif tile == 'stone': cube.pos.y = 1 elif tile == 'grass': pass elif tile == 'forest': pass elif tile == 'water': cube.pos.y = -0.33 #cube.material.color = 0., .7, 0. # green #cube.material.diffuse = 0., .7, 0. # green return cube
print 'Generating f' f = self.f() x_false = np.zeros(A.shape[1]) matrices = {'A': A, 'b': b, 'U': U, 'f': f, 'x_true': x_false} print 'Saving matrices' sio.savemat('{}/188/experiment2_waypoints_matrices_routes_{}.mat' .format(config.EXPERIMENT_MATRICES_DIR, self.params['num_routes']), matrices) def block_sizes_to_U(block_sizes): total = np.sum(block_sizes) blocks = [] for i in block_sizes: blocks.append(1) if i > 1: for j in range(i-1): blocks.append(0) I = np.cumsum(blocks)-1 J = np.array(range(total)) V = np.ones(total) return sps.csr_matrix((V,(I,J))) if __name__ == '__main__': mg = MatrixGenerator(num_routes=10) f = mg.f() mat = sio.loadmat('experiment_matrices/1/experiment2_waypoints_matrices_routes_10.mat') mf = mat['f'] T()