if cuda: net.cuda() if num_GPU > 1: net = nn.DataParallel(net) if pre_trained: net.load_state_dict(torch.load('%s/model/' % out_file + path + 'netG.pth')) # print('Load success!') else: pass # net.apply(weights_init) ########### LOSS & OPTIMIZER ########## criterion = nn.CrossEntropyLoss(ignore_index=255) optimizer = torch.optim.Adam(net.parameters(), lr=learning_rate, betas=(beta1, 0.999)) metric = SegmentationMetric(4) early_stopping = EarlyStopping(patience=7, verbose=True) if __name__ == '__main__': start = time.time() net.train() for epoch in range(1, niter+1): lr_adjust = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, 10, eta_min=0.0, last_epoch=-1) for i in range(0, train_datatset_.__len__(), batch_size): train_datatset_next = train_datatset_.__next__() train_loader = torch.utils.data.DataLoader(dataset=train_datatset_next, batch_size=batch_size, shuffle=True, num_workers=num_workers) for initial_image, semantic_image in train_loader: # print(initial_image.shape) initial_image = initial_image.cuda() semantic_image = semantic_image.cuda()
cuda = True path = '2017' if path == '2017': time_series = 7 else: time_series = 4 data_path = './' + path + 'data/test' test_datatset_ = train_dataset(data_path, time_series=time_series) model = MSFCN2D(time_series, 4, 4) model_path = './checkpoint/' + model.name + '/model/' + path + 'netG.pth' model.load_state_dict(torch.load(model_path, map_location='cpu')) model.eval() metric = SegmentationMetric(4) out_file = './result/' + model.name + '/' + path + '/' if cuda: model.cuda() try: os.makedirs(out_file) except OSError: pass if __name__ == '__main__': start = time.time() for i in range(0, test_datatset_.__len__(), 1): test_datatset_next = test_datatset_.__next__() test_loader = torch.utils.data.DataLoader(dataset=test_datatset_next,
try: os.makedirs(out_file) os.makedirs(out_file + '/') except OSError: pass if cuda: net.cuda() if num_GPU > 1: net = nn.DataParallel(net) ########### LOSS & OPTIMIZER ########## criterion = nn.CrossEntropyLoss(ignore_index=255) optimizer = torch.optim.Adam(net.parameters(), lr=learning_rate) metric = SegmentationMetric(class_num) early_stopping = EarlyStopping(patience=10, verbose=True) if __name__ == '__main__': start = time.time() net.train() lr_adjust = torch.optim.lr_scheduler.CosineAnnealingLR( optimizer, 10, eta_min=learning_rate * 0.01, last_epoch=-1) for epoch in range(1, niter + 1): for iter_num in trange(2000 // index, desc='train, epoch:%s' % epoch): train_iter = train_datatset_.data_iter_index(index=index) for initial_image, semantic_image in train_iter: # print(initial_image.shape) initial_image = initial_image.cuda() semantic_image = semantic_image.cuda()