def verification(model): dict_content = {'FileName': 'Code'} dataset = ImageFolder('C:\CR_\data\Test', split=1, mod='verification') trainloader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=False, num_workers=0, pin_memory=True) for i, data in enumerate(trainloader, 0): # 取出一张图片 inputs, file_name = data """ 1. 将图片进行判断是否含有其他的物体 2. 使用阈值将图片进行除云以外的物体过滤,获得只含云的图片 3. 将图片按照300*300的窗口进行裁剪,将不含云的图片区域按照策略进行舍去 4. 将切割后的图片进行预测 5. 对多个预测结果进行累加取最大值作为未切割图片的预测值 """ dict_content[file_name[0]] = cutPictureAndComputResult(inputs, model) print(dict_content) """ 写入文件 """ csvFile3 = open('C:\\CR_\\Train_label.csv', 'w', newline='') writer2 = csv.writer(csvFile3) for key in dict_content.keys(): writer2.writerow([key, dict_content[key]]) csvFile3.close() """
def test_my(path): x = ImageFolder(path) imgs = [] for i in x: imgs.append(np.array(i)) mean, std = get_inception_score(imgs) print("score = {} +- {}".format(mean, std))
count = count + 1 f.close() MAE = all_mae / count Bias = bias / count RMSE = all_mse / count RMSE = np.sqrt(RMSE) # print(MAE, RMSE, Bias) return MAE, RMSE, Bias if __name__ == '__main__': imgsize = 170 path_ = os.path.abspath(r'D:\1WXJ\DATA\CLASS_Japan\devide_3\train') # trainset = ImageFolder(path_+'/1and2_256/', imgsize, transform)#1and2_256_256_ZJ trainset = ImageFolder(path_ + '/train_Z_256_35281/', imgsize, transform) trainloader = torch.utils.data.DataLoader(trainset, batch_size=128, shuffle=True, num_workers=1) testset = ImageFolder( r'D:\1WXJ\DATA\CLASS_Japan\devide_3\test\2017-2019\Z', imgsize, transform) testloader = torch.utils.data.DataLoader(testset, batch_size=1, num_workers=1) pathoutput = r"D:\1WXJ\Estimate\Model_2\Z_Net_170" # pathlosssave = os.path.join(r'D:\1WXJ\Estimate\plot_2\Z_Net_170') # tys_time = {} # map typhoon-time to wind totalloss = [] test_allloss = []
f.close() MAE = all_mae / count Bias = bias / count RMSE = all_mse / count RMSE = np.sqrt(RMSE) # print(MAE, RMSE, Bias) return MAE, RMSE, Bias if __name__ == '__main__': imgsize = 256 path_ = os.path.abspath(r'D:\1WXJ\DATA\WXJ_images\train_leibie') # trainset = ImageFolder(path_+'/1and2_256/', imgsize, transform)#1and2_256_256_ZJ trainset = ImageFolder(path_ + '/4and5_256_ZJ_really/', imgsize, transform) trainloader = torch.utils.data.DataLoader(trainset,batch_size=16, shuffle=True, num_workers=1) testset = ImageFolder(r'D:\1WXJ\DATA\CLASS_Japan\devide_3\test\2017-2019\Delete_5_zong212_typhoon\A_2',imgsize, transform) testloader = torch.utils.data.DataLoader(testset, batch_size=1, num_workers=1) pathoutput = r"D:\1WXJ\Estimate\Model_2\Res_A_170_2_ZJ_0.0005"# pathlosssave = os.path.join(r'D:\1WXJ\Estimate\plot_2\Res_A_170_2_ZJ_0.0005')# tys_time = {} # map typhoon-time to wind totalloss = [] test_allloss = [] max_RMSE = 0 if not os.path.exists(pathlosssave): os.makedirs(pathlosssave) if not os.path.exists(pathoutput): os.makedirs(pathoutput) model_path = r"D:\1WXJ\Estimate\Model\MODEL_49946_Res34/" net = resnet34(pretrained=False, modelpath=model_path, num_classes=1000) # batch_size=120, 1GPU Memory < 7000M
import os from my_transform import demension_reduce from my_image_folder import ImageFolder from torch.autograd import Variable from my_transform import transform from define_net import Net from torch.autograd import Variable if __name__ == '__main__': path_ = os.path.abspath('.') net = Net() net.load_state_dict(torch.load(path_+'/net_relu.pth')) # your net testset = ImageFolder(path_+'/test_set/',transform) # your test set f = open(path_+'/result_relu.txt','w') # where to write answer tys = {} # map typhoon to its max wind tys_time = {} # map typhoon-time to wind for i in range(0,testset.__len__()): image, actual = testset.__getitem__(i) image = image.expand(1,image.size(0),image.size(1),image.size(2)) # a batch with 1 sample name = testset.__getitemName__(i) output = net(Variable(image)) wind = output.data[0][0] # output is a 1*1 tensor
import torch.utils.data # def testset_loss(testloader, network): # # loader = torch.utils.data.DataLoader(dataset,batch_size=8,num_workers=2) # net.eval() # all_loss = 0.0 # for i,data in enumerate(testloader,0): # inputs,labels = data # inputs, labels = Variable(inputs.cuda()), Variable(labels.cuda()) # outputs = network(inputs) # all_loss = all_loss + abs(labels[0]-outputs.data[0][0]) # return all_loss/i # net.train() if __name__ == '__main__': #path_ = os.path.abspath('.') model_path = r"D:\1WXJ\Estimate\Model\MODEL_49946_Res34/" trainset = ImageFolder(r'D:\五分類train\4and5/', transform) trainloader = torch.utils.data.DataLoader(trainset, batch_size=8, shuffle=True, num_workers=2) testset = ImageFolder( r'D:\1WXJ\DATA\CLASS_Japan\devide_3\test\predict_devide\3and4', transform) testloader = torch.utils.data.DataLoader(testset, batch_size=1, num_workers=2) pathoutput = r"D:\1WXJ\Estimate\Model\MODEL_45_Res34" pathlosssave = os.path.join(r'D:\1WXJ\Estimate\plot\plot_45_Res34') totalloss = [] test_allloss = [] tys_time = {} # map typhoon-time to wind
for i, data in enumerate(loader, 0): inputs, labels = data inputs = Variable(inputs) labels = labels.squeeze() outputs = net(inputs.cuda()) all_loss = all_loss + function_loss_L1(outputs.cpu(), labels.long()) return all_loss / i if __name__ == "__main__": start_time = time.clock() losses_his = [[], []] train_acc = 0 # 数据集加载 trainset = ImageFolder('C:\CR_\data\img', split=0.8, mod='train') trainloader = torch.utils.data.DataLoader(trainset, batch_size=100, shuffle=True, num_workers=5, pin_memory=True) testset = ImageFolder('C:\CR_\data\img', split=0.8, mod='test') # 模型加载 net = Net() #net = models.densenet121() #pretrained=True # 使用CUDA训练模型 if torch.cuda.is_available(): net.cuda() #初始化权重 nn.init.xavier_uniform_(net.conv1[0].weight.data, gain=1) nn.init.constant_(net.conv1[0].bias.data, 0.1)
all_loss = 0.0 for i,data in enumerate(loader,0): inputs,labels = data inputs = Variable(inputs) outputs = network(inputs) all_loss = all_loss + abs(labels[0]-outputs.data[0][0]) return all_loss/i if __name__ == '__main__': path_ = os.path.abspath('.') trainset = ImageFolder(path_+'/train_set/',transform) trainloader = torch.utils.data.DataLoader(trainset,batch_size=8, shuffle=True,num_workers=2) testset = ImageFolder(path_+'/test_set/',transform) net = Net() init.xavier_uniform(net.conv1.weight.data,gain=1) init.constant(net.conv1.bias.data,0.1) init.xavier_uniform(net.conv2.weight.data,gain=1) init.constant(net.conv2.bias.data,0.1) #net.load_state_dict(torch.load(path_+'net_relu.pth')) print net criterion = nn.L1Loss() optimizer = optim.Adam(net.parameters(),lr=0.001)
from define_net_WXJ import Net from define_net_WXJ import Net_20 from define_net_WXJ import Net_80 from torch.autograd import Variable if __name__ == '__main__': path_ = os.path.abspath('.') net = Net_80() net.load_state_dict( torch.load( r'D:\1WXJ\Estimate\Model\WXJNet_3_256_80/70_net.pth')) # your net testset = ImageFolder( r'D:\1WXJ\DATA\CLASS_Japan\devide_3\test\predict_devide\3and4_256_80/', transform) # your test set f = open(r'D:\1WXJ\Estimate\plot\WXJNet_3_256_80_JH2/result_3_70.txt' ) # where to write answer tys = {} # map typhoon to its max wind tys_time = {} # map typhoon-time to wind for i in range(0, testset.__len__()): image, actual = testset.__getitem__(i) image = image.expand(1, image.size(0), image.size(1), image.size(2)) # a batch with 1 sample name = testset.__getitemName__(i)