def __init__(self, args): # set parameters self.args = args self.z_dim = 62 # load dataloader self.data_loader = get_loader(args=args) data = self.data_loader.__iter__().__next__()[0] # net init self.G = G(input_dim=self.z_dim, output_dim=data.shape[1], input_size=args.input_size) self.D = D(input_dim=data.shape[1], output_dim=1, input_size=args.input_size) self.G = self.G.to(args.device) self.D = self.D.to(args.device) self.G_optimizer = optim.Adam(self.G.parameters(), lr=args.lrG, betas=(args.beta1, args.beta2)) self.D_optimizer = optim.Adam(self.D.parameters(), lr=args.lrD, betas=(args.beta1, args.beta2)) # loss criterion self.MSE_loss = nn.MSELoss() # fixed sample to visualize self.fixed_z = torch.rand((args.batch_size, self.z_dim)) # tensorboard if args.use_tensorboard: from libs import Logger self.logger = Logger(log_dir=args.tb_path) # print model configuration print() print("##### Information #####") print("# gan type : ", args.gan_type) print("# dataset : ", args.dataset) print("# batch_size : ", args.batch_size) print("# epoch : ", args.epoch) print()
class WGAN: def __init__(self, args): # set parameters self.args = args self.z_dim = 62 self.c = 0.01 # clipping grad value self.n_critic = 5 # iteraions of the critic per generator update # load dataloader self.data_loader = get_loader(args=args) data = self.data_loader.__iter__().__next__()[0] # net init self.G = G(input_dim=self.z_dim, output_dim=data.shape[1], input_size=args.input_size) self.D = D(input_dim=data.shape[1], output_dim=1, input_size=args.input_size) self.G = self.G.to(args.device) self.D = self.D.to(args.device) self.G_optimizer = optim.Adam(self.G.parameters(), lr=args.lrG, betas=(args.beta1, args.beta2)) self.D_optimizer = optim.Adam(self.D.parameters(), lr=args.lrD, betas=(args.beta1, args.beta2)) # fixed sample to visualize self.fixed_z = torch.rand((args.batch_size, self.z_dim)) # tensorboard if args.use_tensorboard: from libs import Logger self.logger = Logger(log_dir=args.tb_path) # print model configuration print() print("##### Information #####") print("# gan type : ", args.gan_type) print("# dataset : ", args.dataset) print("# batch_size : ", args.batch_size) print("# epoch : ", args.epoch) print() def train(self): # Fixed z fixed_z = torch.rand((self.args.batch_size, self.z_dim)) fixed_z = fixed_z.to(self.args.device) # Training...! self.G.train() self.D.train() total_it = 0 start_time = time.time() loss_dict = {} print('Training start...!!') for epoch in range(self.args.epoch): for it, (x, _) in enumerate(self.data_loader): x = x.to(self.args.device) z = torch.rand((x.size(0), self.z_dim)) z = z.to(self.args.device) # labels self.real_y = torch.ones(x.size(0), 1).to(self.args.device) self.fake_y = torch.zeros(x.size(0), 1).to(self.args.device) # ========== Update D ========== # D_real = self.D(x) real_D_loss = -torch.mean(D_real) fake_x = self.G(z) D_fake = self.D(fake_x) fake_D_loss = torch.mean(D_fake) D_loss = (real_D_loss + fake_D_loss) / 2 self.D_optimizer.zero_grad() D_loss.backward() self.D_optimizer.step() # clipping D for p in self.D.parameters(): p.data.clamp_(-self.c, self.c) loss_dict['D/real_D_loss'] = real_D_loss.item() loss_dict['D/fake_D_loss'] = fake_D_loss.item() # ========== Update G ========== # if (total_it + 1) % self.n_critic == 0: fake_x = self.G(z) D_fake = self.D(fake_x) fake_G_loss = -torch.mean(D_fake) self.G_optimizer.zero_grad() fake_G_loss.backward() self.G_optimizer.step() loss_dict['G/fake_G_loss'] = fake_G_loss.item() if (total_it + 1) % self.args.log_freq == 0 or total_it == 0: et = time.time() - start_time et = str(datetime.timedelta(seconds=et))[:-7] log = f"Elapsed [{et}], Epoch/Iter [{epoch + 1:03d}/{total_it + 1:07d}]" for tag, value in loss_dict.items(): log += f", {tag}: {value:.4f}" print(log) if self.args.use_tensorboard: for tag, value in loss_dict.items(): self.logger.scalar_summary(tag, value, total_it + 1) if (total_it + 1) % self.args.img_save_freq == 0 or total_it == 0: self.G.eval() with torch.no_grad(): fixed_fake_x = self.G(fixed_z) img_path = os.path.join( self.args.img_path, f"{total_it + 1:07d}-images.png") nrow = int( torch.sqrt(torch.Tensor([self.args.batch_size ])).item()) save_image(denorm(fixed_fake_x.data.cpu()), img_path, nrow=nrow, padding=0) self.G.train() if (total_it + 1) % self.args.ckpt_save_freq == 0 or total_it == 0: G_path = os.path.join(self.args.ckpt_path, f'{total_it + 1:07d}-G.pth') D_path = os.path.join(self.args.ckpt_path, f'{total_it + 1:07d}-D.pth') torch.save(self.G.state_dict(), G_path) torch.save(self.D.state_dict(), D_path) total_it += 1
myRTDB.initialise(iRData, True, False) myRTDB.initialise(calcData, False, False) myRTDB.initialise({'iDDUControls': iDDUControls}, False, False) myRTDB.initialise({'config': config}, False, False) dcList = list(myRTDB.car.dcList.keys()) # initialise and start thread rtdbThread = RTDB.RTDBThread(0.01) rtdbThread.setDB(myRTDB) calcThread = iDDUcalc.IDDUCalcThread(0.017) shiftToneThread = UpshiftTone.ShiftToneThread(0.01) guiThread = iDDUgui.iDDUGUIThread(0.02) serialComsThread = SerialComs.SerialComsThread(0.003) raceLapsEstimationThread = raceLapsEstimation.RaceLapsEstimationThread(15) loggerThread = Logger.LoggerThread(0.02) ms = MultiSwitch.MultiSwitch(0.005) for i in range(0, len(iDDUControlsName)): if type(iDDUControls[iDDUControlsName[i]][3]) is bool: ms.addMapping(iDDUControlsName[i]) else: ms.addMapping(iDDUControlsName[i], minValue=iDDUControls[iDDUControlsName[i]][4], maxValue=iDDUControls[iDDUControlsName[i]][5], step=iDDUControls[iDDUControlsName[i]][6]) ms.initCar() calcThread.start() rtdbThread.start()
def __init__(self, dburi): if not self.engine and not self.session: Logger.instance().log.info("Connecting to db %s" % dburi) self.engine = create_engine(dburi, echo=False) Session = sessionmaker(bind=self.engine) self.session = Session()