def main(): train_loader, test_loader, classes = create_loaders() net = Net() criterion = nn.CrossEntropyLoss() optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9) train_network(train_loader, optimizer, criterion, net) test_network(test_loader, net) torch.save(net, 'cnn-model.pt')
def predict(): dataset = GetData1(transform=transforms) cnn = Net().to(device) cnn.eval() cnn.load_state_dict(torch.load(model_path)) #cnn = torch.load('model7.pth', map_location=torch.device('cpu')) torch.save(cnn, 'model7.pth') sub = [] for k, img in enumerate(dataset): img = img.unsqueeze(0) img = Variable(img) img = img.to(device) output = cnn(img) output = output.view(-1, 62) output = F.softmax(output, dim=1) output = torch.argmax(output, dim=1) # 以4 个打包成一个 output = output.view(-1, 4) output = np.array(output.cpu())[0] result = '' for i in output: result = result + alphabet[i] print(result, k / len(path_name)) sub.append(result) subm['label'] = sub subm.to_csv('result5.csv', index=None)
def main(): # graph, inp, max_action, optimal_action, out, action, loss, optimizer = create_graph() model = CNN() optimizer = optim.Adam(model.parameters(), lr=args.learning_rate) if conf.is_training: if args.supervised: model = Net() optimizer = optim.Adam(model.parameters(), lr=args.learning_rate) criterion = torch.nn.MSELoss() losses = supervised_train(model, criterion, optimizer) elif args.reinforcement: model, optimizer, _, losses = utils.resume_checkpoint(model,optimizer,gpu,config.checkpoint) deep_q_train(model)
def main(): # Training settings parser = argparse.ArgumentParser(description='PyTorch MNIST Example') parser.add_argument('--mode', type=int, default=1, metavar='N', help='mode to define which model to be used.') parser.add_argument('--batch-size', type=int, default=10, metavar='N', help='input batch size for training (default: 64)') parser.add_argument('--hidden-size', type=int, default=100, metavar='N', help='hidden layer size for network (default: 100)') parser.add_argument( '--weight-decay', type=int, default=0, metavar='N', help='Weight decay, used for L2 regularization (default: 0)') parser.add_argument('--test-batch-size', type=int, default=1000, metavar='N', help='input batch size for testing (default: 1000)') parser.add_argument('--epochs', type=int, default=60, metavar='N', help='number of epochs to train (default: 10)') parser.add_argument('--lr', type=float, default=0.1, metavar='LR', help='learning rate (default: 0.01)') parser.add_argument('--momentum', type=float, default=0.5, metavar='M', help='SGD momentum (default: 0.5)') parser.add_argument('--no-cuda', action='store_true', default=False, help='disables CUDA training') parser.add_argument('--seed', type=int, default=1, metavar='S', help='random seed (default: 1)') parser.add_argument( '--log-interval', type=int, default=10, metavar='N', help='how many batches to wait before logging training status') parser.add_argument('--save-model', action='store_true', default=False, help='For Saving the current Model') args = parser.parse_args() #check if we can use GPU for training. use_cuda = not args.no_cuda and torch.cuda.is_available() torch.manual_seed(args.seed) device = torch.device("cuda" if use_cuda else "cpu") #may increase number of workers to speed up the dataloading. kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {} # ====================================================================== # STEP 0: Load data from the MNIST database. # This loads our training and test data from the MNIST database files available in torchvision package. train_loader = torch.utils.data.DataLoader( datasets.MNIST( '../data', train=True, download=True, transform=transforms.Compose([ transforms.ToTensor(), #scale pixel values between 0 and 1 transforms.Normalize( (0.1307, ), (0.3081, )) #normalize using mean and standard deviation ])), batch_size=args.batch_size, shuffle=True, **kwargs) test_loader = torch.utils.data.DataLoader( datasets.MNIST( '../data', train=False, transform=transforms.Compose([ transforms.ToTensor(), #scale pixel values between 0 and 1 transforms.Normalize( (0.1307, ), (0.3081, )) #normalize using mean and standard deviation ])), batch_size=args.test_batch_size, shuffle=True, **kwargs) # ====================================================================== # STEP 1: Train a baseline model. # This trains a feed forward neural network with one hidden layer. # Expected accuracy >= 97.80% if args.mode == 1: model = Net(1, args).to(device) accuracy = train_and_test(args, device, model, test_loader, train_loader) # Output accuracy. print(20 * '*' + 'model 1' + 20 * '*') print('accuracy is %f' % (accuracy)) print() # ====================================================================== # STEP 2: Use two convolutional layers. # Expected accuracy >= 99.06% if args.mode == 2: model = Net(2, args).to(device) accuracy = train_and_test(args, device, model, test_loader, train_loader) # Output accuracy. print(20 * '*' + 'model 2' + 20 * '*') print('accuracy is %f' % (accuracy)) print() # ====================================================================== # STEP 3: Replace sigmoid activation with ReLU. # # Expected accuracy>= 99.23% if args.mode == 3: args.learning_rate = 0.03 model = Net(3, args).to(device) accuracy = train_and_test(args, device, model, test_loader, train_loader) # Output accuracy. print(20 * '*' + 'model 3' + 20 * '*') print('accuracy is %f' % (accuracy)) print() # ====================================================================== # STEP 4: Add one more fully connected layer. # # Expected accuracy>= 99.37% if args.mode == 4: args.learning_rate = 0.03 args.weight_decay = 1e-5 model = Net(4, args).to(device) accuracy = train_and_test(args, device, model, test_loader, train_loader) # Output accuracy. print(20 * '*' + 'model 4' + 20 * '*') print('accuracy is %f' % (accuracy)) print() # ====================================================================== # STEP 5: Add dropout to reduce overfitting. # # Expected accuracy: 99.40% if args.mode == 5: args.learning_rate = 0.03 args.num_epochs = 40 args.hiddenSize = 1000 model = Net(4, args).to(device) accuracy = train_and_test(args, device, model, test_loader, train_loader) # Output accuracy. print(20 * '*' + 'model 5' + 20 * '*') print('accuracy is %f' % (accuracy)) print()
def game_loop(args): pygame.init() pygame.font.init() world = None ## Set up pointnet - Lidar num_classes = 1 feature_transform = False net = Net() global episode_count episode_count = int(args.iter) load_pretrained = True if load_pretrained: weights_path = ('./result/dagger_%d.pth' % episode_count) print('loading pretrained model from.. ' + weights_path) net.load_state_dict(torch.load(weights_path)) net.cuda() try: client = carla.Client(args.host, args.port) client.set_timeout(2.0) display = pygame.display.set_mode((args.width, args.height), pygame.HWSURFACE | pygame.DOUBLEBUF) carla_world = client.get_world() # settings = carla_world.get_settings() # carla_world.apply_settings(carla.WorldSettings( # no_rendering_mode=False, # synchronous_mode=True, # fixed_delta_seconds=1.0 / 20)) ## save control signal: throttle, steer, brake, speed episode_count += 1 saver_control = BufferedImageSaver( '%s/ep_%d/' % (out_dir, episode_count), 100, 1, 1, 4, 'Control') ## save used control signal: throttle, steer, brake, speed saver_control_real = BufferedImageSaver( '%s/ep_%d/' % (out_dir, episode_count), 100, 1, 1, 4, 'Control_real') clock = pygame.time.Clock() world = World(client.get_world(), args) controller = KeyboardControl(world, args.autopilot) ## PID agent world.player.set_location(world.map.get_spawn_points()[0].location) ## training road world.player.set_transform( carla.Transform(carla.Location(x=305.0, y=129.0, z=2.0), carla.Rotation(pitch=0.0, yaw=180.0, roll=0.0))) clock.tick() ret = world.tick(timeout=10.0) agent = RoamingAgent(world.player) print('NNPID: current location, ', world.player.get_location()) position = [] with world: while True: # clock.tick_busy_loop(60) # if controller.parse_events(client, world, clock): # carla_world.tick() # ts = carla_world.wait_for_tick() # return if should_quit(): return ## set custom control pid_control = agent.run_step() waypt_buffer = agent._waypoint_buffer while not waypt_buffer: pid_control = agent.run_step() global collision_glb if collision_glb: player_loc = world.player.get_location() #waypt = carla_world.get_map().get_waypoint(player_loc) waypt, _ = waypt_buffer[0] world.player.set_transform(waypt.transform) #world.player.set_location(waypt.transform.location) collision_glb = False print('hit! respawn') pid_control = agent.run_step() pid_control.manual_gear_shift = False ## Neural net control cust_ctrl = controller._control cust_ctrl.throttle = 0.5 # 18km/h cust_ctrl.brake = 0 clock.tick() ret = world.tick(timeout=10.0) if ret: snapshot = ret[0] img_rgb = ret[1] img_lidar = ret[2] world.parse_image_custom(display, img_rgb, 'CameraRGB') world.parse_image_custom(display, img_lidar, 'Lidar') # if ret[3]: # world.on_collision(ret[3]) ''' ## Lidar tst_inputs = np.frombuffer(img_lidar.raw_data, dtype=np.dtype('f4')) tst_inputs = np.reshape(tst_inputs, (int(tst_inputs.shape[0] / 3), 3)) tst_inputs = np.asarray(tst_inputs, dtype=np.float32) # test if there's large points sum_ = np.sum(np.absolute(tst_inputs), axis=1) mask = np.logical_and(sum_ < 50*3, sum_ > 0.0001) pts_filter = tst_inputs[mask] if(pts_filter.shape != tst_inputs.shape): print('pts filter : pts =', pts_filter.shape, tst_inputs.shape) tst_inputs = torch.from_numpy(tst_inputs) #print(tst_inputs.shape) tst_inputs = tst_inputs[0:1900,:] # tst_inputs = tst_inputs.unsqueeze(0) tst_inputs = tst_inputs.transpose(2, 1) tst_inputs = tst_inputs.cuda() points = tst_inputs ''' #print(tst_inputs) ## images raw_img = np.frombuffer(img_rgb.raw_data, dtype=np.uint8) raw_img = raw_img.reshape(720, 1280, -1) raw_img = raw_img[:, :, :3] raw_img = cv2.resize(raw_img, dsize=(180, 180)) #print(raw_img) tst_inputs = raw_img / 255 tst_inputs = np.transpose(tst_inputs, (2, 0, 1)) tst_inputs = np.asarray(tst_inputs, dtype=np.float32) tst_inputs = torch.from_numpy(tst_inputs) tst_inputs = tst_inputs.unsqueeze(0) tst_inputs = tst_inputs.cuda() image = tst_inputs net = net.eval() #print(image) with torch.no_grad(): ah = net(image, []) ah = torch.squeeze(ah) #print(a_list[0].detach().squeeze().tolist()) #print(ah) outputs = ah #print(outputs) cust_ctrl.steer = outputs.item() #''' world.player.apply_control(cust_ctrl) player_loc = world.player.get_location() position.append([player_loc.x, player_loc.y, player_loc.z]) #''' ## check the center of the lane #waypt = carla_world.get_map().get_waypoint(player_loc) waypt, road_option = waypt_buffer[0] lane_center = waypt.transform.location #print(_current_lane_info.lane_type) #print('waypt ', lane_center) #print('player ', player_loc) dist = math.sqrt((lane_center.x - player_loc.x)**2 + (lane_center.y - player_loc.y)**2) ## dif in direction next_dir = waypt.transform.rotation.yaw % 360.0 player_dir = world.player.get_transform( ).rotation.yaw % 360.0 diff_angle = (next_dir - player_dir) % 180.0 ## too far from road, use PID control if (diff_angle > 85 and diff_angle < 105) or dist >= 15: #print('pid_control') #world.player.apply_control(pid_control) #draw_waypoints(carla_world, [waypt], player_loc.z + 2.0) player_loc = world.player.get_location() #waypt = carla_world.get_map().get_waypoint(player_loc) waypt, _ = waypt_buffer[0] world.player.set_transform(waypt.transform) #world.player.set_location(waypt.transform.location) collision_glb = False print('too far! respawn') pid_control = agent.run_step() #''' # draw_waypoints(carla_world, [waypt], player_loc.z + 2.0) #world.player.apply_control(pid_control) else: print("Nothing is returned from world.tick :(") ## Record expert (PID) control c = pid_control throttle = c.throttle # 0.0, 1.0 steer = c.steer #-1.0, 1.0 brake = c.brake # 0.0, 1.0 #print(throttle, steer, brake) v = world.player.get_velocity() speed = 3.6 * math.sqrt(v.x**2 + v.y**2 + v.z**2) #print('Speed: % 15.0f km/h' % (speed)) control = np.array([throttle, steer, brake, speed]) saver_control.add_image(control, "Control") control_used = np.array( [throttle, float(cust_ctrl.steer), brake, speed]) saver_control_real.add_image(control_used, "Control") if len(position) == 2050: break pygame.display.flip() finally: print("Destroying actors...") if world is not None: world.destroy() ## save position position = np.asarray(position) save_name = './dagger_data/ep_%d/path.npy' % (episode_count) np.save(save_name, position) print('position saved in ', save_name) pygame.quit() print("Done")
20) # iteration+1 ## load data and labels dataset = RGBDataset(data_action_exp=data_action_exp, data_rgb=data_rgb) dataloader = torch.utils.data.DataLoader(dataset, batch_size=batchSize, shuffle=True, num_workers=0) num_data = data_rgb.shape[0] ############################################################################# ## training print("training") net = Net() optimizer = optim.Adam(net.parameters(), lr=0.001, betas=(0.9, 0.999)) #if(iteration >= 5): # optimizer = optim.Adam(net.parameters(), lr=5e-4, betas=(0.9, 0.999)) #elif(iteration >= 6): # optimizer = optim.Adam(net.parameters(), lr=2e-4, betas=(0.9, 0.999)) #elif(iteration >= 8): # optimizer = optim.Adam(net.parameters(), lr=1e-4, betas=(0.9, 0.999)) scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=20, gamma=0.5) net.cuda() #net.apply(weights_init) num_batch = num_data / batchSize print('num_batch =', num_batch)
testset = torchvision.datasets.CIFAR10(root='./data', train=False, download=True, transform=transform) testloader = torch.utils.data.DataLoader(testset, batch_size=4, shuffle=False, num_workers=2) classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck') # define a Convolutional Neural Network net = Net() # define a Loss function and optimizer criterion = nn.CrossEntropyLoss() optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9) # train the network for epoch in range(4): # loop over the dataset multiple times running_loss = 0.0 for i, data in enumerate(trainloader, 0): # get the inputs; data is a list of [inputs, labels] inputs, labels = data
def main(): """ 导入训练数据集,测试数据集,并处理 :return: """ train_data = GetData(test_data_path,label_path=test_data_label ,train=True,transform=transforms) train_data_loader= DataLoader(train_data,batch_size=64,shuffle=True,num_workers=4) test_data = GetData(test_data_path,label_path=test_data_label ,train=False, transform=transforms) test_data_loader = DataLoader(test_data, batch_size=batch_size, num_workers=0, shuffle=True) cnn = Net(62,4).to(device) """ 是否训练过,若训练过,则导入训练的模型 """ if go_on: #导入模型和最大精确度 cnn.load_state_dict(torch.load(model_path)) try: with open(best_acc_path,'r') as f: r = f.readline() best_acc = float(r) except: best_acc = 0 print("no such a flie") pass else: best_acc = 0 optimizer = optim.Adam(cnn.parameters(), lr=lr,weight_decay = weight_decay) criterion = nn.MultiLabelSoftMarginLoss().to(device) #做loss,acc的可视化 global_step_train = 0 viz.line([0], [-1], win='train_loss', opts=dict(title='train_loss')) viz.line([0], [-1], win='train_acc', opts=dict(title='train_acc')) viz.line([0], [-1], win='test_acc', opts=dict(title='test_acc')) for epoch in range(epochs): all_loss = [] accs = [] cnn.train() for img ,target in train_data_loader: img, target = Variable(img), Variable(target) img,target = img.to(device),target.to(device) logits = cnn(img) loss = criterion(logits,target) optimizer.zero_grad() loss.backward() optimizer.step() acc = acc_predict(logits,target) #预测效果 accs.append(acc) all_loss.append(loss) viz.line([loss.item()], [global_step_train], win='train_loss', update='append') global_step_train += 1 """print('train_loss: {:.4}|train_acc: {:.4}'.format( torch.mean(torch.Tensor(loss_history)), torch.mean(torch.Tensor(acc_history)), ))""" viz.line([torch.mean(torch.Tensor(accs))], [epoch], win='train_acc', update='append') accs = [] cnn.eval() for img, target in test_data_loader: img ,target= Variable(img),Variable(target) img,target = img.to(device),target.to(device) output = cnn(img) acc = acc_predict(output,target) accs.append(acc) all_loss.append(loss) viz.line([torch.mean(torch.Tensor(accs))], [epoch], win='test_acc', update='append') #if best_acc < torch.mean(torch.Tensor(accs)) or epoch == 10: if best_acc < torch.mean(torch.Tensor(accs)) : best_acc = torch.mean(torch.Tensor(accs)) torch.save(cnn.state_dict(), model_path) with open(best_acc_path,'w') as f: f.write(str(float(best_acc.cpu()))) print(float(best_acc.cpu())) print("这是第{:}个epoch".format(epoch))
def main_pickle_load(which_cut, labels, batches, net_override=False, train=True): """ Used to load in chunk method for training / testing. Might need to make simpler at some point """ x_list = [] y_list = [] full = {} if train: add = '_train' else: add = '_test' for lab in labels: with open( external_file_area + '_dict/' + lab + add + str(which_cut) + '.pickle', 'rb') as handle: full[lab] = pickle.load(handle) print('Loaded Chunk %i' % which_cut) for val, lab in enumerate(labels): for samp in full[lab]: x_list.append(samp) y_list.append(val) x = np.stack(x_list, axis=0) y = np.array(y_list) if train: x_train, x_val, y_train, y_val = train_test_split(x, y, test_size=0.1, random_state=42) # Not touching validation right quick print('Data Split and Randomized') else: x_train, y_train = shuffle_in_unison(x, y) print('Data Randomized') x = None y = None h = x_train.shape[1] w = x_train.shape[2] channels = 1 train_model_path = '../model_train/train.out' train_condition = not Path(train_model_path).is_file() if train_condition or net_override: cnn = Net(batches, channels, h, w, len(labels)) else: cnn = torch.load(train_model_path) print('Model Loaded In') total_train = [] total_lab = [] breakout = False train_use = x_train.shape[0] for x in range(train_use): start = int(x * batches) end = int(start + batches) if end >= train_use: end = int(train_use - 1) start = int(end - batches) breakout = True total_train.append(x_train[start:end, :, :]) total_lab.append(y_train[start:end]) if breakout: break x_train = None y_train = None print('Broken into batches') return cnn, total_train, total_lab, x_val, y_val
from skimage import io import seaborn as sns import matplotlib.pyplot as plt import base64 device = torch.device("cpu") cuda = False A, B, C, D = 64, 8, 16, 16 # add cnn model as well capsules_model = capsules(A=A, B=B, C=C, D=D, E=10, iters=2, cuda=False).to(device) capsules_model.load_state_dict( torch.load('./saved_model/mnist_capsules.pth', map_location=device)) capsules_model.eval() cnn_model = Net() cnn_model.load_state_dict( torch.load('./saved_model/mnist_cnn.pth', map_location=device)) app = Flask(__name__) def convertImage(imgData1): imgstr = re.search(b'base64,(.*)', imgData1).group(1) # print(imgstr) with open('output.png', 'wb') as output: output.write(base64.b64decode(imgstr)) def getInput(imgData): convertImage(imgData) x = imread('output.png', mode='L')