コード例 #1
0
ファイル: predict.py プロジェクト: hmilien/UdacityML
def eval(image_path, checkpoint_name, topk, gpu):

    model = model_utils.load_checkpoint(checkpoint_name, gpu)
    model.eval()
    if (gpu == False):
        model.cpu()

    image = utils.process_image(image_path)

    image = torch.from_numpy(image).unsqueeze(0)
    image = image.float()
    output = model.forward(image)

    top_prob, top_labels = torch.topk(output, topk)
    top_prob = top_prob.exp()
    top_prob_array = top_prob.data.numpy()[0]

    inv_class_to_idx = {v: k for k, v in model.class_to_idx.items()}

    top_labels_data = top_labels.data.numpy()
    top_labels_list = top_labels_data[0].tolist()

    top_classes = [inv_class_to_idx[x] for x in top_labels_list]

    return top_prob_array, top_classes
コード例 #2
0
ファイル: Trainer.py プロジェクト: aelskhawy/own_adl4cv
    def resume_training(self, n_epochs, model_path='./models/best_model.pth'):
        self.model, self.optimizer, self.epoch, self.best_val_loss = load_checkpoint(
            model_path, self.model, self.optimizer)

        # to verify and store optimizer parameters instead of re-init
        self._init_optimizer(self.train_control)
        print("Loaded model and resuming training...")

        self.train(n_epochs)
コード例 #3
0
def train():
        conf = Config()
        # 打印模型配置信息
        conf.dump()
        parser = argparse.ArgumentParser(description='图片分类模型训练')
        parser.add_argument(
                '--resume_checkpoint', action='store', type=str, default='model/checkpoint.pth',
                help='从模型的checkpoint恢复模型,并继续训练,如果resume_checkpoint这个参数提供'
                     '这些参数将忽略--arch, --learning_rate, --hidden_units, and --drop_p')
        args = parser.parse_args()

        #加载数据
        dataloaders, class_to_idx = load_data(conf.data_directory)

        #创建模型,如果模型文件存在
        if args.resume_checkpoint and os.path.exists(args.resume_checkpoint):
                #加载checkpoint
                print('resume_checkpoint已存在,开始加载模型')
                model, optimizer, epoch, history = load_checkpoint(
                        checkpoint_path=args.resume_checkpoint,
                        load_optimizer=True, gpu=conf.cuda)
                start_epoch = epoch + 1
        else:
                #创建新模型和优化器
                print('resume_checkpoint未设置或模型文件不存在,创建新的模型')
                model = create_model(
                        arch=conf.arch, class_to_idx=class_to_idx,
                        hidden_units=conf.hidden_units, drop_p=conf.dropout)
                optimizer = create_optimizer(model=model, lr=conf.learning_rate)
                start_epoch = 1
                history = None

        #训练模型
        history, best_epoch = train_model(
                dataloaders=dataloaders, model=model,
                optimizer=optimizer, gpu=conf.cuda, start_epoch=start_epoch,
                epochs=conf.epochs, train_history=history)

        #测试集上测试模型
        test_acc = test_model(dataloader=dataloaders['test'], model=model, gpu=conf.cuda)
        print(f'模型在测试集上的准确率是 {(test_acc * 100):.2f}%')

        #保存模型
        save_checkpoint(
                save_path=conf.save_path+conf.save_name, epoch=best_epoch, model=model,
                optimizer=optimizer, history=history)

        #绘制历史记录
        plot_history(history)
コード例 #4
0
def main():
    start_time = time()
    
    # Handle Arguments
    in_arg = get_input_args_predict()
    print(in_arg)
    
    # Load checkpoint and rebuild network
    model = model_utils.load_checkpoint(in_arg.input, in_arg.gpu)
    # Process image
    image = data_image_utils.process_image(in_arg.image_path)
    # Label mapping
    cat_to_name = data_image_utils.get_label_mapping(in_arg.category_names)
    # Predict
    probs, classes = model_utils.predict(Variable(image).unsqueeze(0), model, in_arg.top_k)
    model_utils.print_prediction(classes, probs, model.class_to_idx, cat_to_name)
    
    tot_time = time()- start_time
    print("\n** Total Elapsed Runtime:",
          str(int((tot_time/3600)))+":"+str(int((tot_time%3600)/60))+":"
          +str(int((tot_time%3600)%60)) )
コード例 #5
0
def predict():
        conf = Config()
        # 打印模型配置信息
        conf.dump()
        parser = argparse.ArgumentParser(description='图片分类模型训练')
        parser.add_argument(
                '--image_path', type=str,default='data/zhengjian/predict/test/3601216003722.jpg', help='指定要分类的路径')
        parser.add_argument(
                '--checkpoint', type=str, default='model/checkpoint.pth', help='指定checkpoint的模型的保存位置')
        parser.add_argument(
                '--top_k', type=int, default=2, help='选取topk概率的最大类别, dafault=2')
        args = parser.parse_args()

        # 加载转换,处理,转换图片到Tensor
        image_tensor = process_image(image_path=args.image_path)

        # 加载模型,是否使用gpu
        model, _, _, _ = load_checkpoint(
                checkpoint_path=args.checkpoint, load_optimizer=False, gpu=conf.cuda)

        #图片分类
        probabilities, predictions = classify_image(
                image_tensor=image_tensor, model=model, top_k=args.top_k, gpu=conf.cuda)

        #分类结果
        top_class = predictions[0]
        top_prob = probabilities[0]
        top_k = args.top_k
        print(f'\n预测概率最高的类别是 {top_class.capitalize()} '
              f' 概率是{top_prob:.4f}')
        print(f'\n预测的topk是 {top_k} 类别是 {predictions}'
              f'概率是 {probabilities}')

        # 绘图
        display_prediction(
                image_path=args.image_path,
                probabilities=probabilities,
                predictions=predictions)
コード例 #6
0
def main(in_args):

    ## Load the model with Checkpoint
    model = mutils.load_checkpoint(in_args.checkpoint)

    if in_args.gpu:
        device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
    else:
        device = 'cpu'
    model.to(device)

    prob,classes = predict(in_args,model,device)

    with open(in_args.category_names, 'r') as f:
        cat_to_name = json.load(f)

    ## Need to determine index from image filename
    idx = None
    for d in in_args.image.strip().split('/'):
        if d.isdigit():
            idx = d

    if idx is not None:
        correct_val = cat_to_name[idx]
    else:
        sys.stdout.write("ERROR: Could not determine idx from image pathname. Exiting...")
        sys.exit()

    correct_val = cat_to_name[idx]
    sys.stdout.write("\nTruth: %s\n" %correct_val)

    predicted_names = []
    for cidx in classes:
        predicted_names.append(cat_to_name[cidx])
    sys.stdout.write("Returning %d top predictions:\n" %in_args.top_k)
    for i in range(len(prob)):
        sys.stdout.write('\t%d) Predicted %s with probability %.3f\n' %(i+1,predicted_names[i],prob[i]))
コード例 #7
0
def train_model_residual_lowlight_rdn():

    device = DEVICE
    #准备数据
    train_set = HsiCubicTrainDataset('./data/train_lowlight_patchsize32/')
    #print('trainset32 training example:', len(train_set32))
    #train_set = HsiCubicTrainDataset('./data/train_lowlight/')

    #train_set_64 = HsiCubicTrainDataset('./data/train_lowlight_patchsize64/')

    #train_set_list = [train_set32, train_set_64]
    #train_set = ConcatDataset(train_set_list) #里面的样本大小必须是一致的,否则会连接失败
    print('total training example:', len(train_set))

    train_loader = DataLoader(dataset=train_set,
                              batch_size=BATCH_SIZE,
                              shuffle=True)

    #加载测试label数据
    mat_src_path = './data/test_lowlight/origin/soup_bigcorn_orange_1ms.mat'
    test_label_hsi = scio.loadmat(mat_src_path)['label']

    #加载测试数据
    batch_size = 1
    #test_data_dir = './data/test_lowlight/cuk12/'
    test_data_dir = './data/test_lowlight/cubic/'

    test_set = HsiCubicLowlightTestDataset(test_data_dir)
    test_dataloader = DataLoader(dataset=test_set,
                                 batch_size=batch_size,
                                 shuffle=False)

    batch_size, channel, width, height = next(iter(test_dataloader))[0].shape

    band_num = len(test_dataloader)
    denoised_hsi = np.zeros((width, height, band_num))

    save_model_path = './checkpoints/hsirnd_stesim'
    if not os.path.exists(save_model_path):
        os.mkdir(save_model_path)

    #创建模型
    net = HSIRDNSTESim(K)
    init_params(net)
    net = nn.DataParallel(net).to(device)
    #net = net.to(device)

    #创建优化器
    #hsid_optimizer = optim.Adam(net.parameters(), lr=INIT_LEARNING_RATE, betas=(0.9, 0,999))
    hsid_optimizer = optim.Adam(net.parameters(), lr=INIT_LEARNING_RATE)
    scheduler = MultiStepLR(hsid_optimizer, milestones=[200, 400], gamma=0.5)
    #scheduler = CosineAnnealingLR(hsid_optimizer,T_max=600)

    #定义loss 函数
    #criterion = nn.MSELoss()

    is_resume = RESUME
    #唤醒训练
    if is_resume:
        path_chk_rest = dir_utils.get_last_path(save_model_path,
                                                'model_latest.pth')
        model_utils.load_checkpoint(net, path_chk_rest)
        start_epoch = model_utils.load_start_epoch(path_chk_rest) + 1
        model_utils.load_optim(hsid_optimizer, path_chk_rest)

        for i in range(1, start_epoch):
            scheduler.step()
        new_lr = scheduler.get_lr()[0]
        print(
            '------------------------------------------------------------------------------'
        )
        print("==> Resuming Training with learning rate:", new_lr)
        print(
            '------------------------------------------------------------------------------'
        )

    global tb_writer
    tb_writer = get_summary_writer(log_dir='logs')

    gen_epoch_loss_list = []

    cur_step = 0

    first_batch = next(iter(train_loader))

    best_psnr = 0
    best_epoch = 0
    best_iter = 0
    if not is_resume:
        start_epoch = 1
    num_epoch = 600

    for epoch in range(start_epoch, num_epoch + 1):
        epoch_start_time = time.time()
        scheduler.step()
        print('epoch = ', epoch, 'lr={:.6f}'.format(scheduler.get_lr()[0]))
        print(scheduler.get_lr())

        gen_epoch_loss = 0

        net.train()
        #for batch_idx, (noisy, label) in enumerate([first_batch] * 300):
        for batch_idx, (noisy, cubic, label) in enumerate(train_loader):
            #print('batch_idx=', batch_idx)
            noisy = noisy.to(device)
            label = label.to(device)
            cubic = cubic.to(device)

            hsid_optimizer.zero_grad()
            #denoised_img = net(noisy, cubic)
            #loss = loss_fuction(denoised_img, label)

            residual = net(noisy, cubic)
            alpha = 0.8
            loss = recon_criterion(residual, label - noisy)
            #loss = alpha*recon_criterion(residual, label-noisy) + (1-alpha)*loss_function_mse(residual, label-noisy)
            #loss = recon_criterion(residual, label-noisy)
            loss.backward()  # calcu gradient
            hsid_optimizer.step()  # update parameter

            gen_epoch_loss += loss.item()

            if cur_step % display_step == 0:
                if cur_step > 0:
                    print(
                        f"Epoch {epoch}: Step {cur_step}: Batch_idx {batch_idx}: MSE loss: {loss.item()}"
                    )
                else:
                    print("Pretrained initial state")

            tb_writer.add_scalar("MSE loss", loss.item(), cur_step)

            #step ++,每一次循环,每一个batch的处理,叫做一个step
            cur_step += 1

        gen_epoch_loss_list.append(gen_epoch_loss)
        tb_writer.add_scalar("mse epoch loss", gen_epoch_loss, epoch)

        #scheduler.step()
        #print("Decaying learning rate to %g" % scheduler.get_last_lr()[0])

        torch.save(
            {
                'gen': net.state_dict(),
                'gen_opt': hsid_optimizer.state_dict(),
            },
            f"{save_model_path}/hsid_rdn_4rdb_stesim_l1_loss_600epoch_patchsize32_{epoch}.pth"
        )

        #测试代码
        net.eval()
        psnr_list = []
        for batch_idx, (noisy_test, cubic_test,
                        label_test) in enumerate(test_dataloader):
            noisy_test = noisy_test.type(torch.FloatTensor)
            label_test = label_test.type(torch.FloatTensor)
            cubic_test = cubic_test.type(torch.FloatTensor)

            noisy_test = noisy_test.to(DEVICE)
            label_test = label_test.to(DEVICE)
            cubic_test = cubic_test.to(DEVICE)

            with torch.no_grad():

                residual = net(noisy_test, cubic_test)
                denoised_band = noisy_test + residual

                denoised_band_numpy = denoised_band.cpu().numpy().astype(
                    np.float32)
                denoised_band_numpy = np.squeeze(denoised_band_numpy)

                denoised_hsi[:, :, batch_idx] = denoised_band_numpy

                if batch_idx == 49:
                    residual_squeezed = torch.squeeze(residual, axis=0)
                    denoised_band_squeezed = torch.squeeze(denoised_band,
                                                           axis=0)
                    label_test_squeezed = torch.squeeze(label_test, axis=0)
                    noisy_test_squeezed = torch.squeeze(noisy_test, axis=0)
                    tb_writer.add_image(f"images/{epoch}_restored",
                                        denoised_band_squeezed,
                                        1,
                                        dataformats='CHW')
                    tb_writer.add_image(f"images/{epoch}_residual",
                                        residual_squeezed,
                                        1,
                                        dataformats='CHW')
                    tb_writer.add_image(f"images/{epoch}_label",
                                        label_test_squeezed,
                                        1,
                                        dataformats='CHW')
                    tb_writer.add_image(f"images/{epoch}_noisy",
                                        noisy_test_squeezed,
                                        1,
                                        dataformats='CHW')

            psnr = PSNR(denoised_hsi, test_label_hsi)
            psnr_list.append(psnr)

        mpsnr = np.mean(psnr_list)

        denoised_hsi_trans = denoised_hsi.transpose(2, 0, 1)
        test_label_hsi_trans = test_label_hsi.transpose(2, 0, 1)
        mssim = SSIM(denoised_hsi_trans, test_label_hsi_trans)
        sam = SAM(denoised_hsi_trans, test_label_hsi_trans)

        #计算pnsr和ssim
        print("=====averPSNR:{:.4f}=====averSSIM:{:.4f}=====averSAM:{:.4f}".
              format(mpsnr, mssim, sam))
        tb_writer.add_scalars("validation metrics", {
            'average PSNR': mpsnr,
            'average SSIM': mssim,
            'avarage SAM': sam
        }, epoch)  #通过这个我就可以看到,那个epoch的性能是最好的

        #保存best模型
        if psnr > best_psnr:
            best_psnr = psnr
            best_epoch = epoch
            best_iter = cur_step
            torch.save(
                {
                    'epoch': epoch,
                    'gen': net.state_dict(),
                    'gen_opt': hsid_optimizer.state_dict(),
                },
                f"{save_model_path}/hsid_rdn_4rdb_stesim_l1_loss_600epoch_patchsize32_best.pth"
            )

        print(
            "[epoch %d it %d PSNR: %.4f --- best_epoch %d best_iter %d Best_PSNR %.4f]"
            % (epoch, cur_step, psnr, best_epoch, best_iter, best_psnr))

        print(
            "------------------------------------------------------------------"
        )
        print("Epoch: {}\tTime: {:.4f}\tLoss: {:.4f}\tLearningRate {:.6f}".
              format(epoch,
                     time.time() - epoch_start_time, gen_epoch_loss,
                     INIT_LEARNING_RATE))
        print(
            "------------------------------------------------------------------"
        )

        #保存当前模型
        torch.save(
            {
                'epoch': epoch,
                'gen': net.state_dict(),
                'gen_opt': hsid_optimizer.state_dict()
            }, os.path.join(save_model_path, "model_latest.pth"))
    tb_writer.close()
コード例 #8
0
def train_model_residual_lowlight_twostage_gan_best():

    #设置超参数
    batchsize = 128
    init_lr = 0.001
    K_adjacent_band = 36
    display_step = 20
    display_band = 20
    is_resume = False
    lambda_recon = 10

    start_epoch = 1

    device = DEVICE
    #准备数据
    train_set = HsiCubicTrainDataset('./data/train_lowlight/')
    print('total training example:', len(train_set))

    train_loader = DataLoader(dataset=train_set,
                              batch_size=batchsize,
                              shuffle=True)

    #加载测试label数据
    mat_src_path = './data/test_lowlight/origin/soup_bigcorn_orange_1ms.mat'
    test_label_hsi = scio.loadmat(mat_src_path)['label']

    #加载测试数据
    test_batch_size = 1
    test_data_dir = './data/test_lowlight/cubic/'
    test_set = HsiCubicLowlightTestDataset(test_data_dir)
    test_dataloader = DataLoader(dataset=test_set,
                                 batch_size=test_batch_size,
                                 shuffle=False)

    batch_size, channel, width, height = next(iter(test_dataloader))[0].shape

    band_num = len(test_dataloader)
    denoised_hsi = np.zeros((width, height, band_num))

    #创建模型
    net = HSIDDenseNetTwoStage(K_adjacent_band)
    init_params(net)
    #net = nn.DataParallel(net).to(device)
    net = net.to(device)

    #创建discriminator
    disc = DiscriminatorABC(2, 4)
    init_params(disc)
    disc = disc.to(device)
    disc_opt = torch.optim.Adam(disc.parameters(), lr=init_lr)

    num_epoch = 100
    print('epoch count == ', num_epoch)

    #创建优化器
    #hsid_optimizer = optim.Adam(net.parameters(), lr=INIT_LEARNING_RATE, betas=(0.9, 0,999))
    hsid_optimizer = optim.Adam(net.parameters(), lr=init_lr)

    #Scheduler
    scheduler = MultiStepLR(hsid_optimizer, milestones=[40, 60, 80], gamma=0.1)
    warmup_epochs = 3
    #scheduler_cosine = optim.lr_scheduler.CosineAnnealingLR(hsid_optimizer, num_epoch-warmup_epochs+40, eta_min=1e-7)
    #scheduler = GradualWarmupScheduler(hsid_optimizer, multiplier=1, total_epoch=warmup_epochs, after_scheduler=scheduler_cosine)
    #scheduler.step()

    #唤醒训练
    if is_resume:
        model_dir = './checkpoints'
        path_chk_rest = dir_utils.get_last_path(model_dir, 'model_latest.pth')
        model_utils.load_checkpoint(net, path_chk_rest)
        start_epoch = model_utils.load_start_epoch(path_chk_rest) + 1
        model_utils.load_optim(hsid_optimizer, path_chk_rest)
        model_utils.load_disc_checkpoint(disc, path_chk_rest)
        model_utils.load_disc_optim(disc_opt, path_chk_rest)

        for i in range(1, start_epoch):
            scheduler.step()
        new_lr = scheduler.get_lr()[0]
        print(
            '------------------------------------------------------------------------------'
        )
        print("==> Resuming Training with learning rate:", new_lr)
        print(
            '------------------------------------------------------------------------------'
        )

    #定义loss 函数
    #criterion = nn.MSELoss()

    global tb_writer
    tb_writer = get_summary_writer(log_dir='logs')

    gen_epoch_loss_list = []

    cur_step = 0

    first_batch = next(iter(train_loader))

    best_psnr = 0
    best_epoch = 0
    best_iter = 0

    for epoch in range(start_epoch, num_epoch + 1):
        epoch_start_time = time.time()
        scheduler.step()
        #print(epoch, 'lr={:.6f}'.format(scheduler.get_last_lr()[0]))
        print('epoch = ', epoch, 'lr={:.6f}'.format(scheduler.get_lr()[0]))
        print(scheduler.get_lr())
        gen_epoch_loss = 0

        net.train()
        #for batch_idx, (noisy, label) in enumerate([first_batch] * 300):
        for batch_idx, (noisy, cubic, label) in enumerate(train_loader):
            #print('batch_idx=', batch_idx)
            noisy = noisy.to(device)
            label = label.to(device)
            cubic = cubic.to(device)

            ### Update discriminator ###
            disc_opt.zero_grad(
            )  # Zero out the gradient before backpropagation
            with torch.no_grad():
                fake, fake_stage2 = net(noisy, cubic)
            #print('noisy shape =', noisy.shape, fake_stage2.shape)
            #fake.detach()
            disc_fake_hat = disc(fake_stage2.detach() + noisy,
                                 noisy)  # Detach generator
            disc_fake_loss = adv_criterion(disc_fake_hat,
                                           torch.zeros_like(disc_fake_hat))
            disc_real_hat = disc(label, noisy)
            disc_real_loss = adv_criterion(disc_real_hat,
                                           torch.ones_like(disc_real_hat))
            disc_loss = (disc_fake_loss + disc_real_loss) / 2
            disc_loss.backward(retain_graph=True)  # Update gradients
            disc_opt.step()  # Update optimizer

            ### Update generator ###
            hsid_optimizer.zero_grad()
            #denoised_img = net(noisy, cubic)
            #loss = loss_fuction(denoised_img, label)

            residual, residual_stage2 = net(noisy, cubic)
            disc_fake_hat = disc(residual_stage2 + noisy, noisy)
            gen_adv_loss = adv_criterion(disc_fake_hat,
                                         torch.ones_like(disc_fake_hat))

            alpha = 0.2
            beta = 0.2
            rec_loss = beta * (alpha*loss_fuction(residual, label-noisy) + (1-alpha) * recon_criterion(residual, label-noisy)) \
             + (1-beta) * (alpha*loss_fuction(residual_stage2, label-noisy) + (1-alpha) * recon_criterion(residual_stage2, label-noisy))

            loss = gen_adv_loss + lambda_recon * rec_loss

            loss.backward()  # calcu gradient
            hsid_optimizer.step()  # update parameter

            gen_epoch_loss += loss.item()

            if cur_step % display_step == 0:
                if cur_step > 0:
                    print(
                        f"Epoch {epoch}: Step {cur_step}: Batch_idx {batch_idx}: MSE loss: {loss.item()}"
                    )
                    print(
                        f"rec_loss =  {rec_loss.item()}, gen_adv_loss = {gen_adv_loss.item()}"
                    )

                else:
                    print("Pretrained initial state")

            tb_writer.add_scalar("MSE loss", loss.item(), cur_step)

            #step ++,每一次循环,每一个batch的处理,叫做一个step
            cur_step += 1

        gen_epoch_loss_list.append(gen_epoch_loss)
        tb_writer.add_scalar("mse epoch loss", gen_epoch_loss, epoch)

        #scheduler.step()
        #print("Decaying learning rate to %g" % scheduler.get_last_lr()[0])

        torch.save(
            {
                'gen': net.state_dict(),
                'gen_opt': hsid_optimizer.state_dict(),
                'disc': disc.state_dict(),
                'disc_opt': disc_opt.state_dict()
            }, f"checkpoints/two_stage_hsid_dense_gan_{epoch}.pth")

        #测试代码
        net.eval()
        for batch_idx, (noisy_test, cubic_test,
                        label_test) in enumerate(test_dataloader):
            noisy_test = noisy_test.type(torch.FloatTensor)
            label_test = label_test.type(torch.FloatTensor)
            cubic_test = cubic_test.type(torch.FloatTensor)

            noisy_test = noisy_test.to(DEVICE)
            label_test = label_test.to(DEVICE)
            cubic_test = cubic_test.to(DEVICE)

            with torch.no_grad():

                residual, residual_stage2 = net(noisy_test, cubic_test)
                denoised_band = noisy_test + residual_stage2

                denoised_band_numpy = denoised_band.cpu().numpy().astype(
                    np.float32)
                denoised_band_numpy = np.squeeze(denoised_band_numpy)

                denoised_hsi[:, :, batch_idx] = denoised_band_numpy

                if batch_idx == 49:
                    residual_squeezed = torch.squeeze(residual, axis=0)
                    residual_stage2_squeezed = torch.squeeze(residual_stage2,
                                                             axis=0)
                    denoised_band_squeezed = torch.squeeze(denoised_band,
                                                           axis=0)
                    label_test_squeezed = torch.squeeze(label_test, axis=0)
                    noisy_test_squeezed = torch.squeeze(noisy_test, axis=0)
                    tb_writer.add_image(f"images/{epoch}_restored",
                                        denoised_band_squeezed,
                                        1,
                                        dataformats='CHW')
                    tb_writer.add_image(f"images/{epoch}_residual",
                                        residual_squeezed,
                                        1,
                                        dataformats='CHW')
                    tb_writer.add_image(f"images/{epoch}_residual_stage2",
                                        residual_stage2_squeezed,
                                        1,
                                        dataformats='CHW')
                    tb_writer.add_image(f"images/{epoch}_label",
                                        label_test_squeezed,
                                        1,
                                        dataformats='CHW')
                    tb_writer.add_image(f"images/{epoch}_noisy",
                                        noisy_test_squeezed,
                                        1,
                                        dataformats='CHW')

        psnr = PSNR(denoised_hsi, test_label_hsi)
        ssim = SSIM(denoised_hsi, test_label_hsi)
        sam = SAM(denoised_hsi, test_label_hsi)

        #计算pnsr和ssim
        print("=====averPSNR:{:.3f}=====averSSIM:{:.4f}=====averSAM:{:.3f}".
              format(psnr, ssim, sam))
        tb_writer.add_scalars("validation metrics", {
            'average PSNR': psnr,
            'average SSIM': ssim,
            'avarage SAM': sam
        }, epoch)  #通过这个我就可以看到,那个epoch的性能是最好的

        #保存best模型
        if psnr > best_psnr:
            best_psnr = psnr
            best_epoch = epoch
            best_iter = cur_step
            torch.save(
                {
                    'epoch': epoch,
                    'gen': net.state_dict(),
                    'gen_opt': hsid_optimizer.state_dict(),
                    'disc': disc.state_dict(),
                    'disc_opt': disc_opt.state_dict()
                }, f"checkpoints/two_stage_hsid_dense_gan_best.pth")

        print(
            "[epoch %d it %d PSNR: %.4f --- best_epoch %d best_iter %d Best_PSNR %.4f]"
            % (epoch, cur_step, psnr, best_epoch, best_iter, best_psnr))

        print(
            "------------------------------------------------------------------"
        )
        print("Epoch: {}\tTime: {:.4f}\tLoss: {:.4f}\tLearningRate {:.6f}".
              format(epoch,
                     time.time() - epoch_start_time, gen_epoch_loss,
                     scheduler.get_lr()[0]))
        print(
            "------------------------------------------------------------------"
        )

        torch.save(
            {
                'epoch': epoch,
                'gen': net.state_dict(),
                'gen_opt': hsid_optimizer.state_dict(),
                'disc': disc.state_dict(),
                'disc_opt': disc_opt.state_dict()
            }, os.path.join('./checkpoints', "model_latest.pth"))

    tb_writer.close()
コード例 #9
0
ファイル: predict.py プロジェクト: megha14/Flower-Classifier
results = parser.parse_args()

checkpoint = results.checkpoint
image = results.image_path
top_k = results.topk
gpu_mode = results.gpu
cat_names = results.cat_name_dir

device = torch.device("cpu")
if gpu_mode == True:
    device = torch.device("cuda" if torch.cuda.is_available()
                           else "cpu")
    
with open(cat_names, 'r') as f:
    cat_to_name = json.load(f)
    

# Load model
loaded_model = load_checkpoint(checkpoint)
loaded_model.to(device)


# Carry out prediction
probs, classes = predict(image, loaded_model, device, top_k)

# Print probabilities and predicted classes
labels = [cat_to_name[c] for c in classes]
for p, c, l in zip(probs, classes, labels):
    print("Probability is {0:2f} for class {1} with corresponding label {2}".format(p, c, l))
コード例 #10
0
if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument("image_path", default = "/home/workspace/aipnd-project/flowers/test/1/image_06754.jpg", help = "Test image path")
    parser.add_argument("checkpoint_path", default = "/home/workspace/ImageClassifier/dense121_checkpoint.pth", help = "Trained model checkpoint")
    parser.add_argument("--top_k",  default = 5, type = int, help = "Top k probable categories")
    #parser.add_argument("--hidden_units", default = 512, type= int, help = "Hidden units")
    parser.add_argument("--category_names", default = 'cat_to_name.json', type= str, help = "Category to names file")
    parser.add_argument("--gpu", action='store_true', default=False, help = "GPU")
    
    args = parser.parse_args()
    
    print('\n---------Parameters----------')
    print('gpu              = {!r}'.format(args.gpu))
    print('top_k            = {!r}'.format(args.top_k))
    print('arch             = {!r}'.format(args.checkpoint_path.split('/')[-1].split('_')[0]))
    print('Checkpoint       = {!r}'.format(args.checkpoint_path))
    print('-----------------------------\n')
    
    #Prediction
    model = load_checkpoint(args.checkpoint_path)
    probs, classes = predict(args.image_path, model, args.top_k, args.gpu)
   
    with open('/home/workspace/aipnd-project/cat_to_name.json', 'r') as f:
        args.cat_to_name = json.load(f)
    names = []
    for i in classes:
        names.append(args.cat_to_name[str(i)])
    
    print("Probabailities of Top {!r} flowers: ".format(args.top_k),  probs)
    print("Names of Top {!r} flowers: ".format(args.top_k), names)
    print("")
コード例 #11
0
'''

import torch
from torch import nn
from torch import optim

from model_utils import load_checkpoint, predict
from data_utils import load_data, label_mapping, process_image
from argument_parser import get_args_predict

args = get_args_predict()

if (args.device == 'gpu' and torch.cuda.is_available()):
    device = torch.device('cuda')
else:
    device = torch.device('cpu')

loaded_model = load_checkpoint(args.checkpoint, device)

probabilities, classes = predict(args.input, loaded_model, device, args.topk)

idx_to_name = label_mapping(args.json_file)
labels = [idx_to_name[str(i)] for i in classes]

# Print out result
i = 0
while i < args.topk:
    print(
        f"Image is classified as a {labels[i]} with a probability of {round(probabilities[i]*100,2)}%"
    )
    i += 1
コード例 #12
0
parser.add_argument('--gpu', action='store_true', help='Use GPU for training.')
args = parser.parse_args()

gpu = True if args.gpu else False

# Load data
###########################
(dataloaders, class_to_idx) = load_data(args.data_directory)

# Create model
###########################
if args.resume_checkpoint:  # resume_checkpoint path is provided
    # load checkpoint
    (model, optimizer, epoch,
     history) = load_checkpoint(checkpoint_path=args.resume_checkpoint,
                                load_optimizer=True,
                                gpu=gpu)
    start_epoch = epoch + 1
else:
    # create new model and optimizer
    model = create_model(arch=args.arch,
                         class_to_idx=class_to_idx,
                         hidden_units=args.hidden_units,
                         drop_p=args.drop_p)
    optimizer = create_optimizer(model=model, lr=args.learning_rate)
    start_epoch = 1
    history = None

# Train model
###########################
history, best_epoch = train_model(dataloaders=dataloaders,
コード例 #13
0
from config import Config

from dataset import create_test_loader
from model_factory import get_model
from meanteacher import Tester

from model_utils import save_checkpoint, load_checkpoint

if __name__ == "__main__":
    cfg = Config()
    cfg.device = torch.device("cuda" if cfg.device_ids != "cpu" else "cpu")

    # dataset
    eval_loader = create_test_loader(cfg.data_dir, cfg)

    # create model
    model = get_model(cfg.model_arch, pretrained=cfg.pretrained)
    ema_model = get_model(cfg.model_arch, pretrained=cfg.pretrained, ema=True)

    # resume training / load trained weights
    last_epoch = 0
    if cfg.resume:
        model, ema_model, optimizer, last_epoch = load_checkpoint(
            model, ema_model, cfg.resume)

    # create trainer
    tester = Tester(cfg, model, ema_model)
    tester._set_device(cfg.device)

    results = tester(eval_loader)
parser.add_argument('--gpu',
                    action='store_true',
                    help='Use GPU for training. default=True.')
args = parser.parse_args()

gpu = True if args.gpu else False

# Load, process and convert image to Tensor
############################################
image_tensor = process_image(image_path=args.image_path)

# load model
# model moved to device specified by gpu(bool) on load
############################################
model, _, _, _ = load_checkpoint(checkpoint_path=args.checkpoint,
                                 load_optimizer=False,
                                 gpu=gpu)

# Classify image
############################################
probabilities, predictions = classify_image(image_tensor=image_tensor,
                                            model=model,
                                            top_k=args.top_k,
                                            category_names=args.category_names,
                                            gpu=gpu)

# Show results
############################################
top_class = predictions[0]
top_prob = probabilities[0]
top_k = args.top_k