def __read_bal_data(self, file_name): print(util.Section('Read Data from File')) with bz2.open(file_name, 'rt') as file: self.n_cameras, self.n_points, n_observations = map(int, file.readline().split()) self.camera_indices = np.empty(n_observations, dtype=int) self.point_indices = np.empty(n_observations, dtype=int) self.points_2d = np.empty((n_observations, 2)) for i in range(n_observations): camera_idx, point_idx, x, y = file.readline().split() self.camera_indices[i] = int(camera_idx) self.point_indices[i] = int(point_idx) self.points_2d[i] = [float(x), float(y)] # camera parameters: R, t, f, k1, k2. and R are specified as Rodrigues' vector self.camera_params = np.empty(self.n_cameras * 9) for i in range(self.n_cameras * 9): self.camera_params[i] = float(file.readline()) self.camera_params = self.camera_params.reshape((self.n_cameras, -1)) self.points_3d = np.empty(self.n_points * 3) for i in range(self.n_points * 3): self.points_3d[i] = float(file.readline()) self.points_3d = self.points_3d.reshape((self.n_points, -1)) print(f'n_cameras: {self.n_cameras}') print(f'n_points: {self.n_points}') print(f'Total number of parameters: {9 * self.n_cameras + 3 * self.n_points}') print(f'Total number of residuals: {2 * self.points_2d.shape[0]}')
def geomdl_method(degree, ctrl_points): """Generate by geomdl package""" print(util.Section('B-Spline using geomdl Package')) # construct curve = BSpline.Curve() curve.degree = degree curve.ctrlpts = ctrl_points curve.knotvector = utilities.generate_knot_vector(degree, len(ctrl_points)) curve.evaluate(step=0.01) print( f'knots length = {len(curve.knotvector)}, knots = {curve.knotvector}') print(f'c(0) = {curve.evaluate_single(0)}') print(f'c(0.5) = {curve.evaluate_single(0.5)}') print(f'c(0.6) = {curve.evaluate_single(0.6)}') print(f'c(1.0) = {curve.evaluate_single(1.0)}') # plot ctrl_plot_points = np.array(ctrl_points) curve_points = np.array(curve.evalpts) fig = plt.figure('B-Spline using geomdl Package') ax = fig.add_subplot(111) ax.plot(ctrl_plot_points[:, 0], ctrl_plot_points[:, 1], 'g.-.', label='Control Points') ax.plot(curve_points[:, 0], curve_points[:, 1], 'b', label='BSpline Curve') ax.grid(True) ax.set_title('B-Spline using geomdl Package') ax.legend(loc='best') plt.show(block=False)
def line_fit_example(): """example for 3D line fitting""" print(util.Section('3D Line Fitting')) # generate 3D points data, [x, y, z] = [a * t + x0, b * t + y0, c * t + z0] print(util.Section('Generate 3D Points Data')) abc = np.array([0.5, 0.6, 0.7]) xyz0 = np.array([1.0, 2.0, 3.0]) t = np.arange(-3.0, 3.0, 1) xyz = np.zeros((t.shape[0], 3)) for n in range(t.shape[0]): xyz[n, :] = t[n] * abc + xyz0 print(f'abc = {abc}, xyz0 = {xyz0}') # 3D line fitting using SVD print(util.SubSection('Fitting use SVD')) abc_svd, xyz0_svd = fit_line_svd(xyz) print(f'SVD: abc = {abc_svd}, xyz0 = {xyz0_svd}') # 3D line fitting using optimization fit_line_opt(xyz) # generate plot data t_plot = np.arange(-5.0, 5.0, 0.01) xyz_svd = np.zeros((t_plot.shape[0], 3)) for n in range(t_plot.shape[0]): xyz_svd[n, :] = t_plot[n] * abc_svd + xyz0_svd # plot fig = plt.figure('3D Line Fitting') ax = fig.add_subplot(111, projection='3d') ax.plot(xyz[:, 0], xyz[:, 1], xyz[:, 2], 'k.', label='Measurement') ax.plot(xyz_svd[:, 0], xyz_svd[:, 1], xyz_svd[:, 2], 'r', label='SVD Fitting') ax.set_xlabel('X') ax.set_ylabel('Y') ax.set_zlabel('Z') ax.set_title('3D Line Fitting') ax.legend() plt.show(block=True)
def solve_least_square(): print(util.Section('Solve Least Square')) u = np.array([ 4.0, 2.0, 1.0, 5.0e-1, 2.5e-1, 1.67e-1, 1.25e-1, 1.0e-1, 8.33e-2, 7.14e-2, 6.25e-2 ]) y = np.array([ 1.957e-1, 1.947e-1, 1.735e-1, 1.6e-1, 8.44e-2, 6.27e-2, 4.56e-2, 3.42e-2, 3.23e-2, 2.35e-2, 2.46e-2 ]) x0 = np.array([2.5, 3.9, 4.15, 3.9]) res = least_squares(cost_fun, x0, jac=jac, bounds=(0, 100), args=(u, y), verbose=2) print(f'Result: {res.x}')
def auto_grad(): print(util.Section('Auto Grad')) # grad function x = torch.ones(2, 2, requires_grad=True) y = x + 2 z = y * y * 3 out = z.mean() print('y = ', y) print('y.grad_fn = ', y.grad_fn) print('z = {0}, out = {1}'.format(z, out)) # requires grad setting a = torch.randn(2, 2) a = (a * 3) / (a - 1) print('a.requires_grad = ', a.requires_grad) a.requires_grad_(True) print('a.requires_grad = ', a.requires_grad) b = (a * a).sum() print('b.grad_fn = ', b.grad_fn) # gradients out.backward() print('x.grad = ', x.grad) # do more crazy things with autograd x = torch.randn(3, requires_grad=True) y = x * 2 while y.data.norm() < 1000: y = y * 2 print('y = ', y) gradients = torch.tensor([0.1, 1.0, 0.0001], dtype=torch.float) y.backward(gradients) print('x.grad = ', x.grad) # stop autograd print(x.requires_grad) print((x**2).requires_grad) with torch.no_grad(): print((x**2).requires_grad)
def cal_basic(): """ Calculate the basic function """ print(util.Section('Basic Function')) # evaluate of degree 1 knots = np.array([0.0, 0, 1, 2, 4, 7, 7]) degree = 1 u_array = np.arange(knots.min(), knots.max(), 0.01) basic_array = np.zeros([7, u_array.shape[0]]) for n, u in enumerate(u_array): for j in range(0, basic_array.shape[0]): basic_array[j, n] = basic(knots, degree, u, j) # plot fig = plt.figure(f'Basic Function of Degree {degree}') ax = fig.add_subplot(111) ax.grid(True) for j in range(0, basic_array.shape[0]): ax.plot(u_array, basic_array[j, :], label=f'$B_{{{j}{degree}}}$') ax.legend(loc='best') ax.set_title(f'Basic function of Degree 1 Defined on u = {knots}') # evaluate of degree 3 knots = np.array([0.0, 0, 0, 0, 1, 2, 4, 7, 7, 7, 7]) degree = 3 u_array = np.arange(knots.min(), knots.max(), 0.01) basic_array = np.zeros([7, u_array.shape[0]]) for n, u in enumerate(u_array): for j in range(0, basic_array.shape[0]): basic_array[j, n] = basic(knots, degree, u, j) # plot fig = plt.figure(f'Basic Function of Degree {degree}') ax = fig.add_subplot(111) ax.grid(True) for j in range(0, basic_array.shape[0]): ax.plot(u_array, basic_array[j, :], label=f'$B_{{{j}{degree}}}$') ax.legend(loc='best') ax.set_title(f'Basic function of Degree 1 Defined on u = {knots}')
def my_method(degree, ctrl_points): print(util.Section('B-Spling using My Method')) spline = MyBSpline(degree, ctrl_points) print(f'c(0) = {spline.eval(0)}') print(f'c(0.5) = {spline.eval(0.5)}') print(f'c(0.6) = {spline.eval(0.6)}') print(f'c(1.0) = {spline.eval(1.0)}') # plot u = np.arange(0.0, 1.0, 0.01) curve_points = np.zeros((len(u), 2)) for i, uu in enumerate(u): curve_points[i, :] = spline.eval(uu) fig = plt.figure('B-Spline using My Method') ax = fig.add_subplot(111) ax.plot(ctrl_points[:, 0], ctrl_points[:, 1], 'g.-.', label='Control Points') ax.plot(curve_points[:, 0], curve_points[:, 1], 'b', label='BSpline Curve') ax.grid(True) ax.set_title('B-Spline using My Method') ax.legend(loc='best') plt.show(block=False)
def __call__(self, sample): image, landmarks = sample['image'], sample['landmarks'] # swap color axis because numpy image: HxWxC => torch image: CxHxzW image = image.transpose((2, 0, 1)) return {'image': torch.from_numpy(image), 'landmarks': torch.from_numpy(landmarks)} if __name__ == '__main__': # define some parameters parser = OptionParser() parser.add_option('-f', '--folder', dest='folder', default='../../../../dataset/faces/', help='data set folder') options, args = parser.parse_args() # 1. simple usage print(util.Section('Simple Usage')) landmarks_frame = pd.read_csv(options.folder + 'face_landmarks.csv') n = 65 img_name = landmarks_frame.iloc[n, 0] landmarks = landmarks_frame.iloc[n, 1:].values landmarks = landmarks.astype('float').reshape(-1, 2) print('Image name: {}'.format(img_name)) print('Landmarks shape: {}'.format(landmarks.shape)) print('First 4 Landmarks: {}'.format(landmarks[:4])) plt.figure() show_landmarks(io.imread(options.folder + img_name), landmarks) # plt.show() # 2. use as a Dataset print(util.Section('Use as a Dataset'))
def solve_minimize_jac(): """solve with jacobian function""" print(util.Section('Solve by minimize with Jacobians')) x0 = np.array([1.3, 0.7, 0.8, 1.9, 1.2]) res = minimize(rosen, x0, method='BFGS', jac=rosen_jac, options={'disp': True}) print(f'Result: {res.x}')
def solve_minimize(): """simple way""" print(util.Section('Solve by minimize')) x0 = np.array([1.3, 0.7, 0.8, 1.9, 1.2]) res = minimize(rosen, x0, method='powell', options={'xtol': 1e-8, 'disp': True}) print(f'Result: {res.x}')
} dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'val']} class_names = image_datasets['train'].classes # check device device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') # visualize a few images inputs, classes = next(iter(dataloaders['train'])) # make a grid from batch out = torchvision.utils.make_grid(inputs) print(classes) imshow(out, title=[class_names[x] for x in classes]) plt.show() # fine tuning the convnet print(util.Section('fine tuning the convnet')) model_ft = models.resnet18(pretrained=True) num_ftrs = model_ft.fc.in_features model_ft.tc = nn.Linear(num_ftrs, 2) model_ft = model_ft.to(device) criterion = nn.CrossEntropyLoss() # observe that all parameters are being optimzed optimizer_ft = optim.SGD(model_ft.parameters(), lr=0.001, momentum=0.9) # decay lr a factor of 0.1 every 7 epochs exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=7, gamma=0.1) # train and evaluate model_ft = train_model(model_ft, criterion, optimizer_ft,
def getting_started(): print(util.Section('Getting Started')) # construction print(util.SubSection('Construction')) xa1 = torch.empty(5, 3) # uninitialized xa2 = torch.rand(5, 3) # randomly initialized matrix xa3 = torch.zeros(5, 3, dtype=torch.long) # filled zeros and of dtype long xa4 = torch.tensor([5.5, 3]) # directly from data xa5 = xa3.new_ones(5, 3, dtype=torch.double) # new_* method take in sizes xa6 = torch.randn_like(xa5, dtype=torch.float) # override dtype with same size print(f'x size = {xa6.size()}') # operations xb1 = torch.rand(5, 3) yb1 = torch.rand(5, 3) # operation: add print(util.SubSection('Operations: Add')) print(f'xb1 + yb1 = {xb1 + yb1}') print(f'xb1 + yb1 = {torch.add(xb1, yb1)}') # with output argument rb1 = torch.empty(5, 3) torch.add(xb1, yb1, out=rb1) print(f'rb1 = {rb1}') # add in place yb1.add_(xb1) print(f'yb1 = {yb1}') # index print(f'xb1[:,1] = {xb1[:, 1]}') # operation: resize print(util.SubSection('Operations: Resize')) xb2 = torch.randn(4, 4) yb2 = xb2.view(16) zb2 = xb2.view(-1, 8) print(f'xb2 = {xb2}') print(f'yb2 = {yb2}') print(f'zb2 = {zb2}') print( f'xb2.size = {xb2.size()}, yb2.size = {yb2.size()}, zb2.size = {zb2.size()}' ) # if only one element, can use .item() to get the values as a python number xb3 = torch.randn(1) print(f'xb3 = {xb3}') print(f'xb3.item() = {xb3.item()}') # numpy bridge, change one will change the other print(util.SubSection('NumPy Bridge')) # torch => numpy xc1 = torch.ones(5) print(f'xc1 = {xc1}') yc1 = xc1.numpy() print(f'yc1 = {yc1}') # add, y will also changed xc1.add_(1) print(f'xc1 = {xc1}') print(f'yc1 = {yc1}') # numpy => torch xc2 = np.ones(5) yc2 = torch.from_numpy(xc2) np.add(xc2, 1, out=xc2) print(f'xc2 = {xc2}') print(f'yc2 = {yc2}') # CUDA tensors print(util.SubSection('CUDA Tensors')) xd1 = torch.rand((3, 2)) if torch.cuda.is_available(): print('use CUDA') device = torch.device('cuda') yd1 = torch.ones_like(xd1, device=device) # directly create a tensor on GPU xd2 = xd1.to(device) zd1 = xd2 + yd1 print(f'zd1 = {zd1}') print(f'to CPU, zd1 = {zd1.to("cpu", torch.double)}' ) # "to" can also change dtype together
def cal_bspline(): """ Calculate the BSpline function """ print(util.Section('BSpline Function')) # Evaluation case 1 knots = np.array([0.0, 0, 0, 0, 1, 2, 4, 7, 7, 7, 7.0]) ctrl_points = np.array([[1, 2, 3, 4, 5, 6, 7], [2, 3, -3, 4, 5, -5, -6]]) degree = 3 u_array = np.arange(knots.min(), knots.max(), 0.01) bspline_array = np.zeros([ctrl_points.shape[0], u_array.shape[0]]) for n, u in enumerate(u_array): bspline_array[:, n] = bspline(knots, ctrl_points, degree, u) # plot fig = plt.figure(f'Basic Function of Degree {degree} - Case 1') ax = fig.add_subplot(111) ax.grid(True) ax.plot(ctrl_points[0, :], ctrl_points[1, :], 'o--') ax.plot(bspline_array[0, :], bspline_array[1, :]) ax.set_title(f'BSpline function of Degree {degree} Defined on u = {knots}') # Evaluation case 2, double the knots in case 1 knots2 = 2 * knots ctrl_points = np.array([[1, 2, 3, 4, 5, 6, 7], [2, 3, -3, 4, 5, -5, -6]]) degree = 3 u_array2 = np.arange(knots2.min(), knots2.max(), 0.01) bspline_array2 = np.zeros([ctrl_points.shape[0], u_array2.shape[0]]) for n, u in enumerate(u_array2): bspline_array2[:, n] = bspline(knots2, ctrl_points, degree, u) # plot fig = plt.figure(f'Basic Function of Degree {degree} - Double the Knots') ax = fig.add_subplot(111) ax.grid(True) ax.plot(u_array, bspline_array[0, :], label='BSpline1[0]') ax.plot(u_array, bspline_array[1, :], label='BSpline1[1]') ax.plot(u_array2, bspline_array2[0, :], label='BSpline2[0]') ax.plot(u_array2, bspline_array2[1, :], label='BSpline2[1]') ax.legend(loc='best') ax.set_title(f'BSpline function of Degree {degree} Defined on u = {knots}') # Evaluation case 3 knots = np.array([0.0, 0, 0, 0, 2, 2, 2, 7, 7, 7, 7.0]) ctrl_points = np.array([[1, 2, 3, 4, 5, 6, 7], [2, 3, -3, 4, 5, -5, -6]]) degree = 3 u_array = np.arange(knots.min(), knots.max(), 0.01) bspline_array = np.zeros([ctrl_points.shape[0], u_array.shape[0]]) for n, u in enumerate(u_array): bspline_array[:, n] = bspline(knots, ctrl_points, degree, u) # plot fig = plt.figure(f'Basic Function of Degree {degree} - Case 3') ax = fig.add_subplot(111) ax.grid(True) ax.plot(ctrl_points[0, :], ctrl_points[1, :], 'o--') ax.plot(bspline_array[0, :], bspline_array[1, :], label='BSpline1') ax.legend(loc='best') ax.set_title(f'BSpline function of Degree {degree} Defined on u = {knots}') # Evaluation case 4, for different degree knots = np.array([0.0, 0, 0, 0, 1, 2, 4, 7, 7, 7, 7]) ctrl_points = np.array([[1, 2, 3, 4, 5, 6, 7], [2, 3, -3, 4, 5, -5, -6]]) degree = 3 u_array = np.arange(knots.min(), knots.max(), 0.01) bspline_array = np.zeros([ctrl_points.shape[0], u_array.shape[0]]) bspline_array2 = np.zeros([ctrl_points.shape[0], u_array.shape[0]]) bspline_array3 = np.zeros([ctrl_points.shape[0], u_array.shape[0]]) for n, u in enumerate(u_array): bspline_array[:, n] = bspline(knots, ctrl_points, degree, u) bspline_array2[:, n] = bspline(knots, ctrl_points, degree - 1, u) bspline_array3[:, n] = bspline(knots, ctrl_points, degree - 2, u) # plot fig = plt.figure(f'Basic Function of Degree {degree} to {degree-2} - Case 3') ax = fig.add_subplot(111) ax.grid(True) ax.plot(ctrl_points[0, :], ctrl_points[1, :], 'o--') ax.plot(bspline_array[0, :], bspline_array[1, :], label='BSpline1') ax.plot(bspline_array2[0, :], bspline_array2[1, :], label='BSpline2') ax.plot(bspline_array3[0, :], bspline_array3[1, :], label='BSpline3') ax.legend(loc='best') ax.set_title(f'BSpline function of Degree {degree} to {degree-2} Defined on u = {knots}')
""" read data from text file and save to vector of 1024 size :param filename: text file name :return: 1024 vector """ return_vec = np.zeros((1, 1024)) file = open(filename) for i in range(32): line = file.readline() for j in range(32): return_vec[0, 32 * i + j] = int(line[j]) return return_vec if __name__ == '__main__': # Simple KNN Test print(util.Section("Simple KNN Test")) simple_classify = SimpleKnnClassify() simple_classify.test() # Dating Matching print(util.Section("Dating Match")) dating_match = DatingMatch('../../data/dating/datingTestSet2.txt', 0.1) dating_match.test() # Digit Recognition print(util.Section("Digit Recognition")) digit_recognition = DigitRecognition('../../data/digits/training', '../../data/digits/test') digit_recognition.test()
transforms.Compose([ transforms.RandomResizedCrop(input_size), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]), 'val': transforms.Compose([ transforms.Resize(input_size), transforms.CenterCrop(input_size), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]) } print(util.Section('Initializing Datasets and DataLoaders')) # create training and validation datasets image_datasets = { x: datasets.ImageFolder(os.path.join(data_dir, x), data_transforms[x]) for x in ['train', 'val'] } # create training and validation dataloaders dataloaders_dict = { x: torch.utils.data.DataLoader(image_datasets[x], batch_size=batch_size, shuffle=True, num_workers=4) for x in ['train', 'val'] } # detect if we have a GPU avaliable