def set(training): # parse input arguments parser = argparse.ArgumentParser() parser.add_argument("netType", choices=["CNN", "STN", "IC-STN"], help="type of network") parser.add_argument("--group", default="0", help="name for group") parser.add_argument("--model", default="test", help="name for model instance") parser.add_argument("--size", default="36x36", help="image resolution") parser.add_argument("--sizeFull", default="50x50", help="full image resolution") parser.add_argument( "--warpType", default="homography", help="type of warp function on images", choices=["translation", "similarity", "affine", "homography"]) parser.add_argument( "--warpN", type=int, default=4, help="number of recurrent transformations (for IC-STN)") parser.add_argument("--stdC", type=float, default=0.01, help="initialization stddev (classification network)") parser.add_argument("--stdGP", type=float, default=0.001, help="initialization stddev (geometric predictor)") parser.add_argument("--pertScale", type=float, default=0.25, help="initial perturbation scale") parser.add_argument("--transScale", type=float, default=0.25, help="initial translation scale") if training: # training parser.add_argument("--batchSize", type=int, default=100, help="batch size for SGD") parser.add_argument("--lrC", type=float, default=1e-2, help="learning rate (classification network)") parser.add_argument( "--lrCdecay", type=float, default=0.1, help="learning rate decay (classification network)") parser.add_argument( "--lrCstep", type=int, default=500000, help="learning rate decay step size (classification network)") parser.add_argument("--lrGP", type=float, default=None, help="learning rate (geometric predictor)") parser.add_argument("--lrGPdecay", type=float, default=0.1, help="learning rate decay (geometric predictor)") parser.add_argument( "--lrGPstep", type=int, default=500000, help="learning rate decay step size (geometric predictor)") parser.add_argument("--fromIt", type=int, default=0, help="resume training from iteration number") parser.add_argument("--toIt", type=int, default=1000000, help="run training to iteration number") else: # evaluation parser.add_argument("--batchSize", type=int, default=1, help="batch size for evaluation") opt = parser.parse_args() if opt.lrGP is None: opt.lrGP = 0 if opt.netType=="CNN" else \ 1e-3 if opt.netType=="STN" else \ 3e-5 if opt.netType=="IC-STN" else None # --- below are automatically set --- opt.training = training opt.H, opt.W = [int(x) for x in opt.size.split("x")] opt.fullH, opt.fullW = [int(x) for x in opt.sizeFull.split("x")] opt.visBlockSize = int(np.floor(np.sqrt(opt.batchSize))) opt.warpDim = 2 if opt.warpType == "translation" else \ 4 if opt.warpType == "similarity" else \ 6 if opt.warpType == "affine" else \ 8 if opt.warpType == "homography" else None opt.labelN = 43 opt.canon4pts = np.array([[-1, -1], [-1, 1], [1, 1], [1, -1]], dtype=np.float32) opt.image4pts = np.array( [[0, 0], [0, opt.H - 1], [opt.W - 1, opt.H - 1], [opt.W - 1, 0]], dtype=np.float32) opt.bbox = [ int(opt.fullW / 2 - opt.W / 2), int(opt.fullH / 2 - opt.H / 2), int(opt.fullW / 2 + opt.W / 2), int(opt.fullH / 2 + opt.H / 2) ] opt.bbox4pts = np.array( [[opt.bbox[0], opt.bbox[1]], [opt.bbox[0], opt.bbox[3]], [opt.bbox[2], opt.bbox[3]], [opt.bbox[2], opt.bbox[1]]], dtype=np.float32) opt.refMtrx = warp.fit(Xsrc=opt.canon4pts, Xdst=opt.image4pts) opt.bboxRefMtrx = warp.fit(Xsrc=opt.canon4pts, Xdst=opt.bbox4pts) if opt.netType == "STN": opt.warpN = 1 print("({0}) {1}".format(util.toGreen("{0}".format(opt.group)), util.toGreen("{0}".format(opt.model)))) print("------------------------------------------") print("network type: {0}, recurrent warps: {1}".format( util.toYellow("{0}".format(opt.netType)), util.toYellow( "{0}".format(opt.warpN if opt.netType == "IC-STN" else "X")))) print("batch size: {0}, image size: {1}x{2}".format( util.toYellow("{0}".format(opt.batchSize)), util.toYellow("{0}".format(opt.H)), util.toYellow("{0}".format(opt.W)))) print("warpScale: (pert) {0} (trans) {1}".format( util.toYellow("{0}".format(opt.pertScale)), util.toYellow("{0}".format(opt.transScale)))) if training: print("[geometric predictor] stddev={0}, lr={1}".format( util.toYellow("{0:.0e}".format(opt.stdGP)), util.toYellow("{0:.0e}".format(opt.lrGP)))) print("[classification network] stddev={0}, lr={1}".format( util.toYellow("{0:.0e}".format(opt.stdC)), util.toYellow("{0:.0e}".format(opt.lrC)))) print("------------------------------------------") if training: print( util.toMagenta("training model ({0}) {1}...".format( opt.group, opt.model))) return opt
angles_list.append(np.stack([azim, elev, theta])) R_list.append(np.array(R_extr)) # print([np.array(q_extr),t_extr.squeeze()]) trans_path = "{0}/trans.mat".format(save_path) scipy.io.savemat(trans_path,{'angles_list':np.stack(angles_list), 'R_list':np.stack(R_list), 'q_extr_list':np.stack(q_extr_list)}) if bkg == 0: bpy.data.objects['cobble'].select = True bpy.ops.object.delete() # bpy.data.objects['cobble'].select = False # else: # bpy.data.objects['cobble'].select = True # bpy.ops.object.delete() #os.close(1) #os.dup(old) #os.close(old) # clean up for o in bpy.data.objects: if o==camera: continue o.select = True bpy.ops.object.delete() for m in bpy.data.meshes: bpy.data.meshes.remove(m) for m in bpy.data.materials: m.user_clear() bpy.data.materials.remove(m) print(util.toGreen("#{3} {1} done, time={0:.4f} sec. Model saved to {2}".format(time.time()-timeStart,MODEL, trans_path, BUFFER)))
def set(training): # parse input arguments parser = argparse.ArgumentParser() parser.add_argument("--group", default="0", help="name for group") parser.add_argument("--name", default="test", help="name for model instance") parser.add_argument("--loadGP", default=None, help="load pretrained model (GP)") parser.add_argument("--gpu", default="0", help="ID of GPU device (if there are multiple)") parser.add_argument("--size", default="120x160", help="resolution of background image") parser.add_argument("--warpN", type=int, default=1, help="number of spatial transformations") parser.add_argument("--stdGP", type=float, default=0.01, help="initialization stddev (GP)") parser.add_argument("--stdD", type=float, default=0.01, help="initialization stddev (D)") if training: # training parser.add_argument("--loadD", default=None, help="load pretrained model (D)") parser.add_argument("--lrGP", type=float, default=1e-6, help="base learning rate (GP)") parser.add_argument("--lrGPdecay", type=float, default=1.0, help="learning rate decay (GP)") parser.add_argument("--lrGPstep", type=int, default=10000, help="learning rate decay step size (GP)") parser.add_argument("--lrD", type=float, default=1e-4, help="base learning rate (D)") parser.add_argument("--lrDdecay", type=float, default=1.0, help="learning rate decay (D)") parser.add_argument("--lrDstep", type=int, default=10000, help="learning rate decay step size (D)") parser.add_argument("--unpaired", action="store_true", help="feed unpaired samples to D") parser.add_argument("--dplambda", type=float, default=0.3, help="warp update norm penalty factor") parser.add_argument("--gradlambda", type=float, default=10.0, help="gradient penalty factor") parser.add_argument("--updateD", type=int, default=2, help="update N times (D)") parser.add_argument("--updateGP", type=int, default=1, help="update N times (GP)") parser.add_argument("--batchSize", type=int, default=20, help="batch size for SGD") parser.add_argument("--fromIt", type=int, default=0, help="resume training from iteration number") parser.add_argument("--toIt", type=int, default=40000, help="run training to iteration number") parser.add_argument("--initPert", type=float, default=0.1, help="scale of initial perturbation") parser.add_argument("--homoPert", type=float, default=0.1, help="scale of homography perturbation") else: # evaluation parser.add_argument("--batchSize", type=int, default=1, help="batch size for evaluation") parser.add_argument("--initPert", type=float, default=0.0, help="scale of initial perturbation") opt = parser.parse_args() # ------ probably won't touch these ------ opt.warpType = "homography" opt.warpDim = 8 opt.warpApprox = 20 opt.GPUdevice = "/gpu:0" # ------ below automatically set ------ opt.training = training opt.H, opt.W = [int(x) for x in opt.size.split("x")] if training: opt.visBlockSize = int(np.floor(np.sqrt(opt.batchSize))) # opt.visBlockSize = 2 opt.canon4pts = np.array([[-1, -1], [-1, 1], [1, 1], [1, -1]], dtype=np.float32) opt.image4pts = np.array( [[0, 0], [0, opt.H - 1], [opt.W - 1, opt.H - 1], [opt.W - 1, 0]], dtype=np.float32) opt.refMtrx = warp.fit(Xsrc=opt.canon4pts, Xdst=opt.image4pts) print("({0}) {1}".format(util.toGreen("{0}".format(opt.group)), util.toGreen("{0}".format(opt.name)))) print("------------------------------------------") print("GPU device: {0}, batch size: {1}, warps: {2}".format( util.toYellow("{0}".format(opt.gpu)), util.toYellow("{0}".format(opt.batchSize)), util.toYellow("{0}".format(opt.warpN)))) print("image size: {0}x{1}".format(util.toYellow("{0}".format(opt.H)), util.toYellow("{0}".format(opt.W)))) if training: print( "[GP] stddev={3}, lr={0}, decay={1}, step={2}, update={4}".format( util.toYellow("{0:.0e}".format(opt.lrGP)), util.toYellow("{0}".format(opt.lrGPdecay)), util.toYellow("{0}".format(opt.lrGPstep)), util.toYellow("{0:.0e}".format(opt.stdGP)), util.toYellow("{0}".format(opt.updateGP)))) print( "[D] stddev={3}, lr={0}, decay={1}, step={2}, update={4}".format( util.toYellow("{0:.0e}".format(opt.lrD)), util.toYellow("{0}".format(opt.lrDdecay)), util.toYellow("{0}".format(opt.lrDstep)), util.toYellow("{0:.0e}".format(opt.stdD)), util.toYellow("{0}".format(opt.updateD)))) print("------------------------------------------") if training: print( util.toMagenta("training model ({0}) {1}...".format( opt.group, opt.name))) return opt
# training loop for i in range(opt.fromIt,opt.toIt): lrD = opt.lrD*opt.lrDdecay**(i//opt.lrDstep) # make training batch batch = data.makeBatch(opt,trainData,PH) batch[lrD_PH] = lrD # update discriminator runList = [optimD,loss_D,grad_D_norm_mean] for u in range(opt.updateD): _,ld,gdn = sess.run(runList,feed_dict=batch) if (i+1)%10==0: print("it.{0}/{1} lr={3}(GP),{4}(D) loss={5}(GP),{6}(D) norm={7} time={2}" .format(util.toCyan("{0}".format(i+1)), opt.toIt, util.toGreen("{0:.2f}".format(time.time()-timeStart)), util.toYellow("X"), util.toYellow("{0:.0e}".format(lrD)), util.toRed("X"), util.toRed("{0:.4f}".format(ld)), util.toBlue("{0:.4f}".format(gdn)))) # if (i+1)%20==0: # runList = [summaryLossTrain,summaryGradTrain] # sl,sg = sess.run(runList,feed_dict=batch) # summaryWriter.add_summary(sl,i+1) # summaryWriter.add_summary(sg,i+1) # if (i+1)%200==0: # si = sess.run(summaryImageTrain,feed_dict=batch) # summaryWriter.add_summary(si,i+1) # if (i+1)%500==0: # # run on test set
def set(training): # parse input arguments parser = argparse.ArgumentParser() parser.add_argument("--group", default="0", help="name for group") parser.add_argument("--name", default="test", help="name for model instance") parser.add_argument("--loadGP", default=None, help="load pretrained model (GP)") parser.add_argument("--size", default="128x128", help="resolution of foreground image") parser.add_argument("--warpType", default="affine", help="type of warp function on foreground image") parser.add_argument("--warpN", type=int, default=1, help="number of spatial transformations") parser.add_argument("--stdGP", type=float, default=0.01, help="initialization stddev (GP)") parser.add_argument("--stdD", type=float, default=0.01, help="initialization stddev (D)") if training: # training parser.add_argument("--loadD", default=None, help="load pretrained model (D)") parser.add_argument("--lrGP", type=float, default=1e-5, help="base learning rate (GP)") parser.add_argument("--lrGPdecay", type=float, default=1.0, help="learning rate decay (GP)") parser.add_argument("--lrGPstep", type=int, default=20000, help="learning rate decay step size (GP)") parser.add_argument("--lrD", type=float, default=1e-5, help="base learning rate (D)") parser.add_argument("--lrDdecay", type=float, default=1.0, help="learning rate decay (D)") parser.add_argument("--lrDstep", type=int, default=20000, help="learning rate decay step size (D)") parser.add_argument("--dplambda", type=float, default=1.0, help="warp update norm penalty factor") parser.add_argument("--gradlambda", type=float, default=10.0, help="gradient penalty factor") parser.add_argument("--updateD", type=int, default=2, help="update N times (D)") parser.add_argument("--updateGP", type=int, default=1, help="update N times (GP)") parser.add_argument("--batchSize", type=int, default=20, help="batch size for SGD") parser.add_argument("--histSize", type=float, default=10, help="history size in batch") parser.add_argument("--histQsize", type=int, default=10000, help="history queue size for updating D") parser.add_argument("--fromIt", type=int, default=0, help="resume training from iteration number") parser.add_argument("--toIt", type=int, default=50000, help="run training to iteration number") parser.add_argument("--pertFG", type=float, default=0.1, help="scale of initial perturbation (bags)") parser.add_argument("--pertBG", type=float, default=0.1, help="scale of initial perturbation (face)") else: # evaluation parser.add_argument("--batchSize", type=int, default=10, help="batch size for evaluation") parser.add_argument("--pertFG", type=float, default=0.0, help="scale of initial perturbation (bags)") parser.add_argument("--pertBG", type=float, default=0.0, help="scale of initial perturbation (face)") parser.add_argument("--loadImage", default=None, help="load image to test") opt = parser.parse_args() # ------ probably won't touch these ------ ## for original network # opt.dataH,opt.dataW = 144,144 # opt.centerY,opt.centerX = 72,72 ## for our new network opt.dataH,opt.dataW = 128,128 opt.centerY,opt.centerX = 64,64 opt.warpDim = 8 if opt.warpType=="homography" else \ 6 if opt.warpType=="affine" else None opt.warpApprox = 20 opt.GPUdevice = "/gpu:0" # ------ below automatically set ------ opt.training = training opt.H,opt.W = [int(x) for x in opt.size.split("x")] if training: opt.visBlockSize = int(np.floor(np.sqrt(opt.batchSize))) opt.canon4pts = np.array([[-1,-1],[-1,1],[1,1],[1,-1]],dtype=np.float32) opt.image4pts = np.array([[0,0],[0,opt.H-1],[opt.W-1,opt.H-1],[opt.W-1,0]],dtype=np.float32) opt.refMtrx = warp.fit(Xsrc=opt.canon4pts,Xdst=opt.image4pts) opt.image4pts_b = np.array([[opt.centerX-opt.W//2,opt.centerY-opt.H//2], [opt.centerX-opt.W//2,opt.centerY+opt.H//2], [opt.centerX+opt.W//2,opt.centerY+opt.H//2], [opt.centerX+opt.W//2,opt.centerY-opt.H//2]],dtype=np.float32) opt.refMtrx_b = warp.fit(Xsrc=opt.canon4pts,Xdst=opt.image4pts_b) print("({0}) {1}".format( util.toGreen("{0}".format(opt.group)), util.toGreen("{0}".format(opt.name)))) print("------------------------------------------") print("batch size: {0}, warps: {1}".format( util.toYellow("{0}".format(opt.batchSize)), util.toYellow("{0}".format(opt.warpN)))) print("image size: {0}x{1}".format( util.toYellow("{0}".format(opt.H)), util.toYellow("{0}".format(opt.W)))) if training: print("[GP] stddev={3}, lr={0}, decay={1}, step={2}, update={4}".format( util.toYellow("{0:.0e}".format(opt.lrGP)), util.toYellow("{0}".format(opt.lrGPdecay)), util.toYellow("{0}".format(opt.lrGPstep)), util.toYellow("{0:.0e}".format(opt.stdGP)), util.toYellow("{0}".format(opt.updateGP)))) print("[D] stddev={3}, lr={0}, decay={1}, step={2}, update={4}".format( util.toYellow("{0:.0e}".format(opt.lrD)), util.toYellow("{0}".format(opt.lrDdecay)), util.toYellow("{0}".format(opt.lrDstep)), util.toYellow("{0:.0e}".format(opt.stdD)), util.toYellow("{0}".format(opt.updateD)))) print("------------------------------------------") if training: print(util.toMagenta("training model ({0}) {1}...".format(opt.group,opt.name))) return opt
def set(training): # parse input arguments parser = argparse.ArgumentParser() parser.add_argument("--category", default="03001627", help="category ID number") parser.add_argument("--group", default="0", help="name for group") parser.add_argument("--model", default="test", help="name for model instance") parser.add_argument("--load", default=None, help="load trained model to fine-tune/evaluate") parser.add_argument("--std", type=float, default=0.1, help="initialization standard deviation") parser.add_argument("--outViewN", type=int, default=8, help="number of fixed views (output)") parser.add_argument("--inSize", default="64x64", help="resolution of encoder input") parser.add_argument("--outSize", default="128x128", help="resolution of decoder output") parser.add_argument("--predSize", default="128x128", help="resolution of prediction") parser.add_argument("--upscale", type=int, default=5, help="upscaling factor for rendering") parser.add_argument("--novelN", type=int, default=5, help="number of novel views simultaneously") parser.add_argument("--arch", default=None) if training: # training parser.add_argument("--batchSize", type=int, default=20, help="batch size for training") parser.add_argument("--chunkSize", type=int, default=100, help="data chunk size to load") parser.add_argument("--itPerChunk", type=int, default=50, help="training iterations per chunk") parser.add_argument("--lr", type=float, default=1e-4, help="base learning rate (AE)") parser.add_argument("--lrDecay", type=float, default=1.0, help="learning rate decay multiplier") parser.add_argument("--lrStep", type=int, default=20000, help="learning rate decay step size") parser.add_argument("--lambdaDepth", type=float, default=1.0, help="loss weight factor (depth)") parser.add_argument("--fromIt", type=int, default=0, help="resume training from iteration number") parser.add_argument("--toIt", type=int, default=100000, help="run training to iteration number") else: # evaluation parser.add_argument("--batchSize", type=int, default=1, help="batch size for evaluation") opt = parser.parse_args() # these stay fixed opt.sampleN = 100 opt.renderDepth = 1.0 opt.BNepsilon = 1e-5 opt.BNdecay = 0.999 opt.inputViewN = 24 # ------ below automatically set ------ opt.training = training opt.inH, opt.inW = [int(x) for x in opt.inSize.split("x")] opt.outH, opt.outW = [int(x) for x in opt.outSize.split("x")] opt.H, opt.W = [int(x) for x in opt.predSize.split("x")] opt.visBlockSize = int(np.floor(np.sqrt(opt.batchSize))) opt.Khom3Dto2D = np.array( [[opt.W, 0, 0, opt.W / 2], [0, -opt.H, 0, opt.H / 2], [0, 0, -1, 0], [0, 0, 0, 1]], dtype=np.float32) opt.Khom2Dto3D = np.array( [[opt.outW, 0, 0, opt.outW / 2], [0, -opt.outH, 0, opt.outH / 2], [0, 0, -1, 0], [0, 0, 0, 1]], dtype=np.float32) opt.fuseTrans = np.load("trans_fuse{0}.npy".format(opt.outViewN)) print("({0}) {1}".format(util.toGreen("{0}".format(opt.group)), util.toGreen("{0}".format(opt.model)))) print("------------------------------------------") print("batch size: {0}, category: {1}".format( util.toYellow("{0}".format(opt.batchSize)), util.toYellow("{0}".format(opt.category)))) print("size: {0}x{1}(in), {2}x{3}(out), {4}x{5}(pred)".format( util.toYellow("{0}".format(opt.inH)), util.toYellow("{0}".format(opt.inW)), util.toYellow("{0}".format(opt.outH)), util.toYellow("{0}".format(opt.outW)), util.toYellow("{0}".format(opt.H)), util.toYellow("{0}".format(opt.W)))) if training: print("learning rate: {0} (decay: {1}, step size: {2})".format( util.toYellow("{0:.2e}".format(opt.lr)), util.toYellow("{0}".format(opt.lrDecay)), util.toYellow("{0}".format(opt.lrStep)))) print("depth loss weight: {0}".format( util.toYellow("{0}".format(opt.lambdaDepth)))) print("viewN: {0}(out), upscale: {1}, novelN: {2}".format( util.toYellow("{0}".format(opt.outViewN)), util.toYellow("{0}".format(opt.upscale)), util.toYellow("{0}".format(opt.novelN)))) print("------------------------------------------") if training: print( util.toMagenta("training model ({0}) {1}...".format( opt.group, opt.model))) return opt
if(i<1200): optim = optimGP1 else: optim = optimGP2 ''' if(i<2000): optim = optimGP1 elif(i<10000): optim = optimGP2 else: optim = optimGP3 ''' runList = [optim,loss_GP,vars_all,summary_op,imageWarped,StandardData] _,lg,var,summary_res,image,stimage = sess.run(runList,feed_dict=batch) summaryWriter.add_summary(summary_res,i) if (i+1)%10==0: print("it.{0}/{1} lr={3} loss={4}(GP) time={2}" .format(util.toCyan("{0}".format((i+1))), toIt, util.toGreen("{0:.2f}".format(time.time()-timeStart)), util.toYellow("{0:.0e}".format(lrGP)), util.toRed("{0:.4f}".format(lg)), )) if (i+1)%5000==0: saver_GP.save(sess,"model_0/models_it{0}_stack{1}.ckpt".format(i+1,stack_num)) #print(image.shape,stimage.shape) #scipy.misc.imshow(1-image[0][0]) #scipy.misc.imshow(1-stimage[0]) saver_GP.save(sess,"model_0/models_it{0}_stack{1}.ckpt".format(toIt,stack_num)) print(util.toYellow("======= TRAINING DONE ======="))
def set(training): # parse input arguments parser = argparse.ArgumentParser() parser.add_argument("netType", choices=["CNN", "STN", "IC-STN"], help="type of network") parser.add_argument("--group", default="0", help="name for group") parser.add_argument("--model", default="test", help="name for model instance") parser.add_argument("--size", default="28x28", help="image resolution") parser.add_argument( "--warpType", default="homography", help="type of warp function on images", choices=["translation", "similarity", "affine", "homography"]) parser.add_argument( "--warpN", type=int, default=4, help="number of recurrent transformations (for IC-STN)") parser.add_argument("--stdC", type=float, default=0.1, help="initialization stddev (classification network)") parser.add_argument("--stdGP", type=float, default=0.1, help="initialization stddev (geometric predictor)") parser.add_argument("--pertScale", type=float, default=0.25, help="initial perturbation scale") parser.add_argument("--transScale", type=float, default=0.25, help="initial translation scale") if training: # training parser.add_argument("--port", type=int, default=8097, help="port number for visdom visualization") parser.add_argument("--batchSize", type=int, default=100, help="batch size for SGD") parser.add_argument("--lrC", type=float, default=1e-2, help="learning rate (classification network)") parser.add_argument("--lrGP", type=float, default=None, help="learning rate (geometric predictor)") parser.add_argument("--lrDecay", type=float, default=1.0, help="learning rate decay") parser.add_argument("--lrStep", type=int, default=100000, help="learning rate decay step size") parser.add_argument("--fromIt", type=int, default=0, help="resume training from iteration number") parser.add_argument("--toIt", type=int, default=500000, help="run training to iteration number") else: # evaluation parser.add_argument("--batchSize", type=int, default=1, help="batch size for evaluation") opt = parser.parse_args() if opt.lrGP is None: opt.lrGP = 0 if opt.netType=="CNN" else \ 1e-2 if opt.netType=="STN" else \ 1e-4 if opt.netType=="IC-STN" else None # --- below are automatically set --- assert (torch.cuda.is_available()) # support only training on GPU for now torch.set_default_tensor_type("torch.cuda.FloatTensor") opt.training = training opt.H, opt.W = [int(x) for x in opt.size.split("x")] opt.visBlockSize = int(np.floor(np.sqrt(opt.batchSize))) opt.warpDim = 2 if opt.warpType == "translation" else \ 4 if opt.warpType == "similarity" else \ 6 if opt.warpType == "affine" else \ 8 if opt.warpType == "homography" else None opt.labelN = 10 opt.canon4pts = np.array([[-1, -1], [-1, 1], [1, 1], [1, -1]], dtype=np.float32) opt.image4pts = np.array( [[0, 0], [0, opt.H - 1], [opt.W - 1, opt.H - 1], [opt.W - 1, 0]], dtype=np.float32) opt.refMtrx = np.eye(3).astype(np.float32) if opt.netType == "STN": opt.warpN = 1 print("({0}) {1}".format(util.toGreen("{0}".format(opt.group)), util.toGreen("{0}".format(opt.model)))) print("------------------------------------------") print("network type: {0}, recurrent warps: {1}".format( util.toYellow("{0}".format(opt.netType)), util.toYellow( "{0}".format(opt.warpN if opt.netType == "IC-STN" else "X")))) print("batch size: {0}, image size: {1}x{2}".format( util.toYellow("{0}".format(opt.batchSize)), util.toYellow("{0}".format(opt.H)), util.toYellow("{0}".format(opt.W)))) print("warpScale: (pert) {0} (trans) {1}".format( util.toYellow("{0}".format(opt.pertScale)), util.toYellow("{0}".format(opt.transScale)))) if training: print("[geometric predictor] stddev={0}, lr={1}".format( util.toYellow("{0:.0e}".format(opt.stdGP)), util.toYellow("{0:.0e}".format(opt.lrGP)))) print("[classification network] stddev={0}, lr={1}".format( util.toYellow("{0:.0e}".format(opt.stdC)), util.toYellow("{0:.0e}".format(opt.lrC)))) print("------------------------------------------") if training: print( util.toMagenta("training model ({0}) {1}...".format( opt.group, opt.model))) return opt