def main(): desc = argv.next() or "B2" n = int(desc[-1:]) if desc[0] == "A": gen = build_An(n) elif desc[0] == "B": gen = build_Bn(n) elif desc[0] == "D": gen = build_Dn(n) else: print("no group desc found matching %s"%desc) return I = Op.identity(n) zero = Op(n) G = mulclose(gen) print(len(G)) projs = [] for g in G: if g==I: continue if g*g==I: projs.append(posproj(g, I)) assert(len(set(projs))==len(projs)) #for H in search(projs): # print("found") pairs = [] for p in projs: for q in projs: if p*q != q*p: pairs.append((p, q)) print("pairs:", len(pairs)) shuffle(pairs) for (p1, q1) in pairs: for (p2, q2) in pairs: A = tensor(p1, p2) B = tensor(q1, q2) if A*B == B*A: write(".") print()
def load_image_rect_flatten_labels(img_path, start_point, end_point, padding, color = (0, 255, 0), color_pad = (255, 0, 0), thickness = 2): #Import image orig_image = cv2.imread(img_path) #Show the image with matplotlib start_point_pad = (start_point[0]-padding, start_point[1]-padding) end_point_pad = (end_point[0]+padding, end_point[1]+padding) image = orig_image.copy() image = cv2.rectangle(image, start_point, end_point, color, thickness) image = cv2.rectangle(image, start_point_pad, end_point_pad, color_pad, thickness) # label image image_labels = np.zeros((orig_image.shape[0], orig_image.shape[1])) image_labels[start_point[1]:end_point[1], start_point[0]:end_point[0]] = 1.0 # crop img = orig_image[start_point_pad[1]:end_point_pad[1], start_point_pad[0]:end_point_pad[0]] labels = image_labels[start_point_pad[1]:end_point_pad[1], start_point_pad[0]:end_point_pad[0]] img = np.moveaxis(img, -1, 0) img = torch.tensor(img) img = img.float() img = img.unsqueeze(0) labels = np.tensor(labels) flatten_labels = labels.view(-1) foreground = torch.where(flatten_labels == 1)[0] foreground = foreground.tolist() background = torch.where(flatten_labels == 0)[0] background = background.tolist() # Add to return image to see image with rectangles return img, flatten_labels, foreground, background, image
def calc(model, data_loader): model.eval() dataset = data_loader.dataset prog_bar = mmcv.ProgressBar(len(dataset)) seg_eval = SegEvaluator(class_names=dataset.SEG_CLASSES) det_eval = SegEvaluator(class_names=dataset.SEG_CLASSES) print('\nStart Test Loop') # print('batch_size:', data_loader.batch_size) # 1 for idx, data in enumerate(data_loader): # print(type(data['img'][0])) # DataContainter with torch.no_grad(): seg_res, box_res = model(return_loss=False, rescale=True, **data) # len(box_res) == batch_size == 1 # box_res: [dict('pts_bbox'=dict(boxes_3d=bboxes, scores_3d=scores, labels_3d=labels))] # handle seg seg_label = data['seg_label'][0].data[0] # list of tensor seg_pts_indices = data['seg_pts_indices'][0].data[0] # list of tensor seg_points = data['seg_points'][0].data[0] seg_pred = seg_res.argmax(1).cpu().numpy() pred_list = [] gt_list = [] left_idx = 0 for i in range(len(seg_label)): # num_points = len(seg_pts_indices[i]) assert len(seg_label[i]) == len(seg_pts_indices[i]) num_points = len(seg_label[i]) right_idx = left_idx + num_points pred_list.append(seg_pred[left_idx:right_idx]) gt_list.append(seg_label[i].numpy()) left_idx = right_idx seg_eval.batch_update(pred_list, gt_list) # handle det dic = box_res[0]['pts_bbox'] tensor_boxes = dic['boxes_3d'].tensor[:, :7].cuda() labels = dic['labels_3d'] num_seg_pts = len(seg_points[0]) num_pred_boxes = len(tensor_boxes) fake_labels = torch.tensor([4] * num_seg_pts) box_idx = points_in_boxes_gpu(seg_points[0].cuda().unsqueeze(0), tensor_boxes.unsqueeze(0)).squeeze(0) for i in range(num_pred_boxes): mask = box_idx == i # select points in i_th box fake_labels[mask] = labels[i] det_eval.update(fake_labels.numpy(), seg_label[0]) # progress bar batch_size = len(box_res) for _ in range(batch_size): prog_bar.update() print(seg_eval.print_table()) print('overall_acc:', seg_eval.overall_acc) print('overall_iou:', seg_eval.overall_iou) print(det_eval.print_table()) print('overall_acc:', det_eval.overall_acc) print('overall_iou:', det_eval.overall_iou)
def test(): for n in [2, 3, 4]: ops = build_An(n) assert len(mulclose(ops))==factorial(n) ops = build_Bn(n) assert len(mulclose(ops))==2**n*factorial(n) ops = build_Dn(n) assert len(mulclose(ops))==2**(n-1)*factorial(n) from numpy import kron as tensor def allclose(A, B): return numpy.abs(A - B).sum()==0 for A in ops: for B in ops: assert (A==B) == (hash(A)==hash(B)) assert (A!=B) != (A==B) lhs = (A*B).todense() rhs = ((numpy.dot(A.todense(), B.todense()))) assert allclose(lhs, rhs) lhs = (A.tensor(B)).todense() rhs = ((tensor(A.todense(), B.todense()))) assert allclose(lhs, rhs) for A in ops: assert allclose(A.transpose().todense(), A.todense().transpose()), str(A) ops = mulclose(build_Bn(2)) tops = [] for A in ops: for B in ops: C = Tensor([A, B]) tops.append(C) #print(len(tops)) for A in tops: assert A.get_canonical() == A for A in tops: for B in tops: assert (A==B) == allclose(A.todense(), B.todense()) assert (A==B) == (A.get_canonical()==B.get_canonical()) assert (A==B) == (hash(A)==hash(B)) print("OK")
def test(): from numpy import kron as tensor perms = [[0, 1, 2], [0, 2, 1], [1, 0, 2], [1, 2, 0], [2, 1, 0], [2, 0, 1]] signss = [(a, b, c) for a in [-1, 1] for b in [-1, 1] for c in [-1, 1]] ops = [Op(perm, signs) for perm in perms for signs in signss] for A in ops: for B in ops: assert (A == B) == (hash(A) == hash(B)) assert (A != B) != (A == B) lhs = (A * B).todense() rhs = ((numpy.dot(A.todense(), B.todense()))) assert numpy.allclose(lhs, rhs) lhs = (A.tensor(B)).todense() rhs = ((tensor(A.todense(), B.todense()))) assert numpy.allclose(lhs, rhs) for A in ops: assert numpy.allclose(A.transpose().todense(), A.todense().transpose()), str(A) ops = mulclose(build_Bn(2)) tops = [] for A in ops: for B in ops: C = Tensor([A, B]) tops.append(C) #print(len(tops)) for A in tops: assert A.get_canonical() == A for A in tops: for B in tops: assert (A == B) == numpy.allclose(A.todense(), B.todense()) assert (A == B) == (A.get_canonical() == B.get_canonical()) assert (A == B) == (hash(A) == hash(B)) print("OK")
def multi_toric(Xs, Zs): n = 8 N = Xs[0].n I = Op.identity(N) for X in Xs: for Z in Zs: assert X * X == I assert Z * Z == I assert Z * X == -X * Z print("toric: dim=%d" % (N**n)) II = Op.identity(N**n) plaqs = [(0, 1, 2, 5), (0, 2, 3, 7), (4, 5, 6, 1), (6, 7, 3, 4)] stars = [(0, 1, 3, 4), (1, 2, 3, 6), (0, 4, 5, 7), (2, 5, 6, 7)] for a in plaqs: for b in stars: assert len(set(a).intersection(set(b))) % 2 == 0 ops = [] for idxs in stars: for X in Xs: op = [I] * n for idx in idxs: op[idx] = X op = tensor(*op) assert op * op == II ops.append(op) for idxs in plaqs: for Z in Zs: op = [I] * n for idx in idxs: op[idx] = Z op = tensor(*op) assert op * op == II ops.append(op) for A in ops: for B in ops: assert A * B == B * A v = numpy.zeros(N**n) v[0] = 1. for A in ops: v = v + A(v) assert (abs(norm(v)) > 0.1) v /= norm(v) #print(v) spaces = [] for A in ops: S = A.get_stab() #print("get_stab:", len(S)) spaces.append(S) u = A(v) r = numpy.dot(u, v) err = norm(u - r * v) assert abs(r - 1.) < EPSILON assert err < EPSILON #print(r, err, end=' ') #print() S = spaces[0] for S1 in spaces[1:]: print("codespace:", len(S)) S = S.intersect(S1) print("codespace:", len(S))
import sys import GP_function as gp import GP_function_torch as gpt import nuts as nuts #%% read data data_s = pd.read_csv("Accord_Simulation_Results_LL_Airbag_300_Large_Range.csv") data_f = pd.read_csv("Field_Test_Data_Full_Size_Sedan.csv") data_l = pd.read_csv("injury_function_data_chest.csv") data_e = pd.read_csv("Experimental Data - All Sedans.csv") n_var = 3 n_para = 7 n_res = 1 parameter_default = tch.tensor([1, 0.45, 1, 1, 0.55, 1, 1]) val_nugget = 10 ^ (-4) n_s = len(data_s) x_s = data_s.iloc[:, 1:(n_var + n_para + 1)].to_numpy() y_s = tch.tensor(data_s.iloc[:, n_var + n_para + 1]) n_f = len(data_f) x_f = data_f.iloc[:, 3:6].to_numpy() a_f = data_f['Age'].to_numpy() z_f = (data_f.iloc[:, 7] >= 3).to_numpy() n_l = len(data_l) a_l = data_l['Age'].to_numpy() z_l = data_l['Injury Indicator'].to_numpy() y_l = data_l['ChestD'].to_numpy()
def main(): X, Z = build(2) I = dot(X, X) #print(I) #print(X) #print(Z) II = tensor(I, I) P2 = [tensor(X, I), tensor(Z, I), tensor(I, X), tensor(I, Z)] found = set() for g in mulclose(P2): s = shortstr(g) found.add(s) assert len(found) == 32 assert shortstr(II) in found if 0: G = mulclose(P2) for g in G: A = numpy.array(g, dtype=numpy.float) vals = numpy.linalg.eigvals(A) print(vals) # all +1, -1 d = argv.get("d", 4) gen = build(d) n = (2**d) * factorial(d) assert len(mulclose(gen)) == n # slow... G = mulclose(gen, n) print("orders:") for g in G: print(order(g), end=' ') print() if 0: for g in G: A = numpy.array(g, dtype=numpy.float) vals = numpy.linalg.eigvals(A) print(vals) # all kinds of stuff.. if d == 4: assert len([g for g in G if shortstr(g) in found]) == 32 pairs = [] c_comm = 0 c_anti = 0 total = 0 for i in range(len(G)): for j in range(len(G)): g = G[i] h = G[j] gh = dot(g, h) hg = dot(h, g) total += 1 if eq(gh, -hg): c_anti += 1 elif eq(gh, hg): c_comm += 1 if shortstr(g) in found or shortstr(h) in found: continue if eq(gh, -hg) and i < j: #assert order(g)==2 print(order(g), end=' ') #print(g) #print(h) #print(eq(g, g.transpose()), end=' ') #print(eq(h, h.transpose())) if eq(g, g.transpose()) and eq(h, h.transpose()) and i < j: pairs.append((g, h)) #print(".", end='') #print() #print(c_comm, c_anti, total) print("pairs:", len(pairs)) ops = {} for (g, h) in pairs: ops[shortstr(g)] = g ops[shortstr(h)] = h ops = list(ops.values()) print(len(ops)) for g in ops: for h in ops: a = eq(dot(g, h), dot(h, g)) b = eq(dot(g, h), -dot(h, g)) if a == b == False: s = ' ' elif a: s = '+ ' elif b: s = '- ' print(s, end=' ') print() return A, B, C, D = gen pairs = [ s.split() for s in ("ABDA CBDC", "BACB DCDA", "BACB DCDC", "BACDBCBADCBC DCABDCDABACD") ] for l, r in pairs: left = [eval(s) for s in l] right = [eval(s) for s in r] left = dotx(*left) right = dotx(*right) if 1: print(shortstr(left)) print(shortstr(left) in found) print() print(shortstr(right)) print(shortstr(right) in found) print() print(shortstr(dotx(left, right))) print() print() assert eq((dotx(right, left)), -(dotx(left, right)))
using some bitwise arithmetic voodoo """ return [x >> i & 1 for i in range(10)] def fizz_buzz_encode(x: int) -> List[int]: if x % 15 == 0: return [0, 0, 0, 1] elif x % 5 == 0: return [0, 0, 1, 0] elif x % 3 == 0: return [0, 1, 0, 0] else: return [1, 0, 0, 0] x_train = tensor([binary_encode(i) for i in range(101, 1024)]) y_train = tensor([fizz_buzz_encode(i) for i in range(101, 1024)]) HIDDEN_SIZE = 50 BATCH_SIZE = 32 LEARNING_RATE = 0.001 NUM_EPOCHS = 10_000 net = NeuralNet([ Linear(input_size=10, output_size=HIDDEN_SIZE), Tanh(), Linear(input_size=HIDDEN_SIZE, output_size=4) ]) loss = SSE() optimizer = SGD(lr=LEARNING_RATE)
tensor_boxes = tensor_boxes[:, :7] box_idx = points_in_boxes_gpu(pts1.unsqueeze(0), tensor_boxes.unsqueeze(0)).squeeze(0) # boxes = LiDARInstance3DBoxes(gt_bboxes_3d.tensor[:, :7]) # box_idx = boxes.points_in_boxes(seg_points[:, :3].cuda()) # 1. # fake_labels = torch.tensor([num_classes-1] * len(seg_labels)) # for i in range(len(box_idx)): # if box_idx[i] != -1: # fake_labels[i] = gt_labels_3d[box_idx[i]] # 2. # fake_labels = torch.tensor(list(map(lambda x: gt_labels_3d[x] if x >= 0 else num_classes-1, box_idx))) # 3. fake_labels = torch.tensor([num_classes-1] * len(seg_labels)) mask = box_idx != -1 fake_labels[mask] = torch.tensor(list(map(lambda x: gt_labels_3d[x], box_idx[mask])), dtype=torch.long) end = time.time() t1 = end - start evaluator = SegEvaluator(class_names=dataset.SEG_CLASSES) evaluator.update(fake_labels.numpy(), seg_labels.numpy()) print(evaluator.print_table()) print('overall_acc:', evaluator.overall_acc) print('overall_iou:', evaluator.overall_iou) # nuscenes points_in_box start = time.time() allcorners = gt_bboxes_3d.corners # (N, 8, 3) fake_labels = torch.tensor([num_classes-1] * len(seg_labels))