def init_workflow(self): # === Create the AccumulatedObjects. === self.frame.add_accumulated_value("lossTest", 10) self.frame.AV["loss"].avgWidth = 10 # ======= AVP. ====== # === Create a AccumulatedValuePlotter object for ploting. === if (True == self.flagUseIntPlotter): self.frame.AVP.append(\ WorkFlow.PLTIntermittentPlotter(\ self.frame.workingDir + "/IntPlot", "loss", self.frame.AV, ["loss"], [True], semiLog=True) ) self.frame.AVP.append(\ WorkFlow.PLTIntermittentPlotter(\ self.frame.workingDir + "/IntPlot", "lossTest", self.frame.AV, ["lossTest"], [True], semiLog=True) ) else: self.frame.AVP.append(\ WorkFlow.VisdomLinePlotter(\ "loss", self.frame.AV, ["loss"], [True], semiLog=True) ) self.frame.AVP.append(\ WorkFlow.VisdomLinePlotter(\ "lossTest", self.frame.AV, ["lossTest"], [True], semiLog=True) )
def __init__(self, workingDir, prefix="", suffix=""): super(MyWF, self).__init__(workingDir, prefix, suffix) # === Custom member variables. === self.countTrain = 0 # self.countTest = 0 with np.load(join(datapath, filecat)) as cat_data: train_cat, val_cat, test_cat = cat_data['train'], cat_data[ 'valid'], cat_data['test'] self.dataset = SketchDatasetHierarchy(train_cat) self.valset = SketchDatasetHierarchy(val_cat) self.sketchnet = SketchRnn(InputNum, HiddenNumLine, HiddenNumSketch, OutputNum) if LoadPretrain: self.sketchnet = self.load_model(self.sketchnet, modelname) if LoadLineModel: self.sketchnet.load_line_model(LineModel) self.sketchnet.cuda() self.criterion_mse = nn.MSELoss() self.optimizer = optim.Adam( self.sketchnet.parameters(), lr=Lr) #get_high_params(), lr = Lr) #,weight_decay=1e-5) # === Create the AccumulatedObjects. === self.AV['loss'].avgWidth = 100 self.add_accumulated_value("loss_cons", 100) self.add_accumulated_value("test_loss_cons") self.add_accumulated_value("loss_kl", 100) self.add_accumulated_value("loss_kl_line", 100) self.add_accumulated_value("loss_cons_high", 100) # self.add_accumulated_value("loss_eof", 100) self.add_accumulated_value("test_loss") # === Create a AccumulatedValuePlotter object for ploting. === self.AVP.append(WorkFlow.VisdomLinePlotter(\ "train_test_loss", self.AV, ['loss', 'test_loss'], [True, False])) self.AVP.append(WorkFlow.VisdomLinePlotter(\ "loss_kl", self.AV, ["loss_kl", "loss_kl_line"], [True, True])) self.AVP.append(WorkFlow.VisdomLinePlotter(\ "loss_cons", self.AV, ["loss_cons", "test_loss_cons"], [True, False])) self.AVP.append(WorkFlow.VisdomLinePlotter(\ "loss_cons_high", self.AV, ["loss_cons_high"], [True]))
def register_info_plotter(self, name, infoNames, avgFlags, subDir='IntPlot', flagSemiLog=False): if ( self.flagUseIntPlotter ): self.frame.AVP.append(\ WorkFlow.PLTIntermittentPlotter(\ os.path.join(self.frame.workingDir, subDir), name, self.frame.AV, infoNames, avgFlags, semiLog=flagSemiLog) ) else: self.frame.AVP.append(\ WorkFlow.VisdomLinePlotter(\ name, self.frame.AV, infoNames, avgFlags, semiLog=flagSemiLog) )
def __init__(self, workingDir, prefix = "", suffix = ""): super(MyWF, self).__init__(workingDir, prefix, suffix) # === Custom member variables. === logstr = '' for param in LogParamList: # record useful params in logfile try: logstr += param + ': '+ str(globals()[param]) + ', ' except: pass self.logger.info(logstr) # === Create the AccumulatedObjects. === self.add_accumulated_value("loss2", 10) self.add_accumulated_value("lossLeap") self.add_accumulated_value("testAvg1", 10) self.add_accumulated_value("testAvg2", 20) self.add_accumulated_value("lossTest") # This should raise an exception. # self.add_accumulated_value("loss") # === Create a AccumulatedValuePlotter object for ploting. === WorkFlow.VisdomLinePlotter.host = "http://128.237.179.115" WorkFlow.VisdomLinePlotter.port = 8097 avNameList = ["loss", "loss2", "lossLeap"] avAvgFlagList = [ True, False, True ] self.AVP.append(\ WorkFlow.VisdomLinePlotter(\ "Combined", self.AV, avNameList, avAvgFlagList)\ ) self.AVP.append(\ WorkFlow.VisdomLinePlotter(\ "loss", self.AV, ["loss"])\ ) self.AVP.append(\ WorkFlow.VisdomLinePlotter(\ "losse", self.AV, ["loss2"], [True])\ ) self.AVP.append(\ WorkFlow.VisdomLinePlotter(\ "lossLeap", self.AV, ["lossLeap"], [True])\ ) self.AVP[-1].title = "Loss Leap" self.AVP.append(\ WorkFlow.VisdomLinePlotter(\ "testAvg1", self.AV, ["testAvg1"], [True])\ ) self.AVP.append(\ WorkFlow.VisdomLinePlotter(\ "testAvg2", self.AV, ["testAvg2"], [True])\ ) # === Custom member variables. === self.countTrain = 0 self.countTest = 0
def __init__(self, workingDir, prefix="", suffix=""): super(MyWF, self).__init__(workingDir, prefix, suffix) # === Custom member variables. === self.countTrain = 0 # self.countTest = 0 with np.load(join(datapath, filecat)) as cat_data: train_cat, val_cat, test_cat = cat_data['train'], cat_data[ 'valid'], cat_data['test'] self.dataset = SketchDatasetHierarchy(train_cat) self.valset = SketchDatasetHierarchy(val_cat) self.sketchnet = StrokeRnn(InputNum, HiddenNum, OutputNum) if LoadPretrain: self.sketchnet = self.load_model(self.sketchnet, modelname) self.sketchnet.cuda() self.criterion_mse = nn.MSELoss(size_average=True) # self.criterion_ce = nn.CrossEntropyLoss(weight=torch.Tensor([1,10,100]).cuda(), size_average=Bidirection) self.optimizer = optim.Adam(self.sketchnet.parameters(), lr=Lr) #,weight_decay=1e-5) # === Create the AccumulatedObjects. === self.AV['loss'].avgWidth = 100 self.add_accumulated_value("loss_cons", 100) self.add_accumulated_value("loss_kl", 100) self.add_accumulated_value("loss_loc", 100) self.add_accumulated_value("test_loss") # === Create a AccumulatedValuePlotter object for ploting. === self.AVP.append(WorkFlow.VisdomLinePlotter(\ "train_test_loss", self.AV, ['loss', 'test_loss'], [True, False])) self.AVP.append(WorkFlow.VisdomLinePlotter(\ "loss_cons", self.AV, ["loss_cons", "loss_kl"], [True, True])) self.AVP.append(WorkFlow.VisdomLinePlotter(\ "loss_cons", self.AV, ["loss_loc"], [True]))
def __init__(self, params): super(MyWF, self).__init__(params["workDir"], params["jobPrefix"], params["jobSuffix"]) self.params = params self.verbose = params["wfVerbose"] # === Create the AccumulatedObjects. === self.AV["loss"].avgWidth = 10 self.add_accumulated_value("lossTest", 2) # === Create a AccumulatedValuePlotter object for ploting. === WorkFlow.VisdomLinePlotter.host = self.params["visdomHost"] WorkFlow.VisdomLinePlotter.port = self.params["visdomPort"] self.AVP.append(\ WorkFlow.VisdomLinePlotter(\ "loss", self.AV, \ ["loss", "lossTest"], \ [True, False], semiLog = True)\ ) # === Custom member variables. === self.countTrain = 0 self.countTest = 0 # Cuda stuff. # self.cudaDev = None # ConvolutionalStereoNet. self.csn = None self.dataset = None self.dataLoader = None self.dlIter = None # The iterator stems from self.dataLoader. self.datasetTest = None self.dataLoaderTest = None self.dlIterTest = None # The iterator stems from self.dataLoaderTest. # Training variables. self.criterion = torch.nn.SmoothL1Loss() self.optimizer = None
def append_plotter(self, plotName, valueNameList, smoothList, semiLog=False): if self.plotterType == 'Visdom': self.AVP.append(WorkFlow.VisdomLinePlotter(plotName, self.AV, valueNameList, smoothList, semiLog=semiLog)) elif self.plotterType == 'Int': self.AVP.append(WorkFlow.PLTIntermittentPlotter(self.workingDir + "/IntPlot", plotName, self.AV, valueNameList, smoothList, semiLog=semiLog))
def __init__(self, workingDir, prefix="", suffix=""): super(MyWF, self).__init__(workingDir, prefix, suffix) # === Custom member variables. === logstr = '' for param in LogParamList: # record useful params in logfile logstr += param + ': ' + str(globals()[param]) + ', ' self.logger.info(logstr) self.countEpoch = 0 self.countTrain = 0 self.device = 'cuda' # Dataloader for the training and testing self.train_loader = torch.utils.data.DataLoader(datasets.MNIST( '../data', train=True, download=True, transform=transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.1307, ), (0.3081, )) ])), batch_size=Batch, shuffle=True) self.test_loader = torch.utils.data.DataLoader(datasets.MNIST( '../data', train=False, transform=transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.1307, ), (0.3081, )) ])), batch_size=Batch, shuffle=True) self.train_data_iter = iter(self.train_loader) self.test_data_iter = iter(self.test_loader) self.model = Net().cuda() self.optimizer = optim.SGD(self.model.parameters(), lr=Lr) self.criterion = nn.NLLLoss() self.AV['loss'].avgWidth = 10 # there's a default plotter for 'loss' self.add_accumulated_value( 'accuracy', 10) # second param is the number of average data self.add_accumulated_value('test') self.add_accumulated_value('test_accuracy') self.AVP.append( WorkFlow.VisdomLinePlotter("train_loss", self.AV, ['loss'], [False])) # False: no average line self.AVP.append( WorkFlow.VisdomLinePlotter("test_loss", self.AV, ['test'], [False])) self.AVP.append( WorkFlow.VisdomLinePlotter("train_test_accuracy", self.AV, ['accuracy', 'test_accuracy'], [True, False])) self.AVP.append( WorkFlow.VisdomLinePlotter("train_test_loss", self.AV, ['loss', 'test'], [True, False]))
def __init__(self, workingDir, prefix="", suffix=""): super(MyWF, self).__init__(workingDir, prefix, suffix) # === Custom member variables. === logstr = '' for param in LogParamList: # record useful params in logfile logstr += param + ': ' + str(globals()[param]) + ', ' self.logger.info(logstr) self.countEpoch = 0 self.unlabelEpoch = 0 self.countTrain = 0 self.device = 'cuda' global TestBatch # Dataloader for the training and testing labeldataset = LabelDataset(balence=True, mean=mean, std=std) unlabeldataset = UnlabelDataset(batch=UnlabelBatch, balence=True, mean=mean, std=std) self.train_loader = DataLoader(labeldataset, batch_size=Batch, shuffle=True, num_workers=6) self.train_unlabel_loader = DataLoader(unlabeldataset, batch_size=1, shuffle=True, num_workers=4) if TestType == 1 or TestType == 0: testdataset = DukeSeqLabelDataset(labelfile=testlabelfile, batch=UnlabelBatch, data_aug=True, mean=mean, std=std) TestBatch = 1 elif TestType == 2: testdataset = FolderLabelDataset( imgdir='/home/wenshan/headingdata/val_drone', data_aug=False, mean=mean, std=std) elif TestType == 3: testdataset = FolderUnlabelDataset( imgdir='/datadrive/exp_bags/20180811_gascola', data_aug=False, include_all=True, mean=mean, std=std) TestBatch = 1 self.test_loader = torch.utils.data.DataLoader(testdataset, batch_size=TestBatch, shuffle=True, num_workers=1) self.train_data_iter = iter(self.train_loader) self.train_unlabeld_iter = iter(self.train_unlabel_loader) self.test_data_iter = iter(self.test_loader) self.model = MobileReg() if LoadPreMobile: self.model.load_pretrained_pth(pre_mobile_model) if LoadPreTrain: loadPretrain(self.model, pre_model) self.optimizer = optim.Adam(self.model.parameters(), lr=Lr) self.criterion = nn.MSELoss() self.AV['loss'].avgWidth = 100 # there's a default plotter for 'loss' self.add_accumulated_value( 'label_loss', 100) # second param is the number of average data self.add_accumulated_value('unlabel_loss', 100) self.add_accumulated_value('test_loss', 10) self.add_accumulated_value('test_label', 10) self.add_accumulated_value('test_unlabel', 10) self.AVP.append( WorkFlow.VisdomLinePlotter("total_loss", self.AV, ['loss', 'test_loss'], [True, True])) self.AVP.append( WorkFlow.VisdomLinePlotter("label_loss", self.AV, ['label_loss', 'test_label'], [True, True])) self.AVP.append( WorkFlow.VisdomLinePlotter("unlabel_loss", self.AV, ['unlabel_loss', 'test_unlabel'], [True, True]))