Ejemplo n.º 1
0
def test(model, batch_size, save):
    # evaluate
    model.eval()
    torch_device = torch.device('cuda')
    # Loss and optimizer 定义损失函数, 使用的是最小平方误差函数
    criterion = nn.MSELoss()
    criterion = criterion.to(torch_device)
    eval_loss_dict = []

    test_loader = get_data.get_test_loader(batch_size)

    batch_tqdm = tqdm(enumerate(test_loader, 0), total=test_loader.__len__())
    for i, data in batch_tqdm:
        inputs, labels = data
        labels = get_data.get_size_labels(1, labels)
        inputs = inputs.cuda()
        labels = labels.cuda()
        outputs = model(inputs)
        loss = criterion(outputs, labels)
        loss = loss.cuda()
        eval_loss_dict.append(loss.item())

        if save:
            for j in range(10):
                save_imgname = 'G:/evocnn/image_set/output/' + str(i * 10 + j) + '.tif'
                img = TIFF.open(save_imgname, 'w')
                # 要保存之前先要改成CPU
                output_img = outputs[j].cpu().detach().numpy()
                img.write_image(output_img, write_rgb=True)

    mean_test_loss = np.mean(eval_loss_dict)
    std_test_loss = np.std(eval_loss_dict)
    print("valid mean:{},std:{}".format(mean_test_loss, std_test_loss))
Ejemplo n.º 2
0
def train(model, batch_size, optimizer, model_type):
    torch_device = torch.device('cuda')
    model.cuda()
    if model_type == 1:
        train_loader = get_data.get_final_train_loader(batch_size)
    elif model_type == 2:
        train_loader = get_data.get_gauss50_final_train_loader(batch_size)
    else:
        train_loader = get_data.get_mixed_final_train_loader(batch_size)

    # Loss and optimizer 3.定义损失函数, 使用的是最小平方误差函数
    criterion = nn.MSELoss()
    criterion = criterion.to(torch_device)

    loss_dict = []
    num_epochs = train_loader.__len__()
    # Train the model 5. 迭代训练
    model.train()
    batch_tqdm = tqdm(enumerate(train_loader, 0), total=num_epochs, ncols=100)
    for i, data in batch_tqdm:
        # Convert numpy arrays to torch tensors  5.1 准备tensor的训练数据和标签
        inputs, labels = data
        labels = get_data.get_size_labels(1, labels)
        inputs = inputs.cuda()
        labels = labels.cuda()
        # labels = get_data.get_size_labels(indi.get_layer_size(),labels)

        # Forward pass  5.2 前向传播计算网络结构的输出结果
        optimizer.zero_grad()
        outputs = model(inputs)
        # 5.3 计算损失函数
        loss = criterion(outputs, labels)
        loss = loss.cuda()

        # Backward and optimize 5.4 反向传播更新参数
        loss.backward()
        optimizer.step()

        # 可选 5.5 打印训练信息和保存loss
        loss_dict.append(loss.item())
    print('Loss: {:.4f}'.format(np.mean(loss_dict)))
    file_path = os.getcwd() + '/loss.txt'
    with open(file_path, 'a') as myfile:
        myfile.write(str(np.mean(loss_dict)))
        myfile.write("\n")
Ejemplo n.º 3
0
    def parse_individual(self, indi):
        torch_device = torch.device('cuda')
        cnn = CNN(indi)
        cnn.cuda()
        print(cnn)
        complexity = get_total_params(cnn.cuda(), (220, 30, 30))

        train_loader = get_data.get_mixed_train_loader(self.batch_size)

        # Loss and optimizer 3.定义损失函数, 使用的是最小平方误差函数
        criterion = nn.MSELoss()
        criterion = criterion.to(torch_device)

        # 4.定义迭代优化算法, 使用的是Adam,SGD不行
        learning_rate = 0.004
        optimizer = torch.optim.Adam(cnn.parameters(), lr=learning_rate)
        loss_dict = []
        num_epochs = train_loader.__len__()
        # Train the model 5. 迭代训练
        cnn.train()
        for i, data in enumerate(train_loader, 0):
            # Convert numpy arrays to torch tensors  5.1 准备tensor的训练数据和标签
            inputs, labels = data
            labels = get_data.get_size_labels(1, labels)
            inputs = inputs.cuda()
            labels = labels.cuda()
            # labels = get_data.get_size_labels(indi.get_layer_size(),labels)

            # Forward pass  5.2 前向传播计算网络结构的输出结果
            optimizer.zero_grad()
            outputs = cnn(inputs)
            # 5.3 计算损失函数
            loss = criterion(outputs, labels)
            loss = loss.cuda()

            # Backward and optimize 5.4 反向传播更新参数
            loss.backward()
            optimizer.step()

            # 可选 5.5 打印训练信息和保存loss
            loss_dict.append(loss.item())
            if (i + 1) % 100 == 0:
                print('Epoch [{}/{}], Loss: {:.4f}'.format(i + 1, num_epochs, loss.item()))

        # evaluate
        cnn.eval()
        eval_loss_dict = []
        valid_loader = get_data.get_mixed_validate_loader(self.batch_size)
        for i, data in enumerate(valid_loader, 0):
            inputs, labels = data
            labels = get_data.get_size_labels(1, labels)
            inputs = inputs.cuda()
            labels = labels.cuda()
            outputs = cnn(inputs)
            loss = criterion(outputs, labels)
            loss = loss.cuda()
            eval_loss_dict.append(loss.item())

        mean_test_loss = np.mean(eval_loss_dict)
        std_test_loss = np.std(eval_loss_dict)
        print("valid mean:{},std:{}".format(mean_test_loss, std_test_loss))
        return mean_test_loss, std_test_loss, complexity