print('Finished Training')
    trained = True

net.load_state_dict(torch.load(PATH))
print('load parameters successfully')

print('for train')
net.eval()  # turn on eval, switch off dropout layer
with torch.no_grad():
    y_predict = net(Tensor(x_train))
predict = calculate_label(y_predict[:, 0].numpy(), w0)
analysis(x_train0, y_predict[:, 0].numpy(), y_train, w0)
y_train[y_train == -1] = 0.
predict[predict == -1] = 0.
vize.visualize_sample(x_train0[::seqLen], y_train[::seqLen])
vize.visualize_prediction(x_train0[::seqLen], y_train[::seqLen], predict)

print('for validate')
net.eval()  # turn on eval, switch off dropout layer
with torch.no_grad():
    y_predict = net(Tensor(x_validate))
predict = calculate_label(y_predict[:, 0].numpy(), w0)
analysis(x_validate0, y_predict[:, 0].numpy(), y_validate, w0)
y_validate[y_validate == -1] = 0.
predict[predict == -1] = 0.
vize.visualize_sample(x_validate0[::seqLen], y_validate[::seqLen])
vize.visualize_prediction(x_validate0[::seqLen], y_validate[::seqLen], predict)

#print('for test')
#with torch.no_grad():
#    y_predict=net(Tensor(x_test))
    def eval_run(self):
        # load checkpoint
        if self.checkpoint_path:
            self.saver.restore(self.session, self.checkpoint_path)
            log.info("Loaded from checkpoint!")

        log.infov("Start 1-epoch Inference and Evaluation")

        log.info("# of examples = %d", len(self.dataset))
        length_dataset = len(self.dataset)

        max_steps = int(length_dataset / self.batch_size) + 1
        log.info("max_steps = %d", max_steps)

        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(self.session,
                                               coord=coord, start=True)

        evaler = EvalManager()
        try:
            for s in xrange(max_steps):
                step, acc, step_time, batch_chunk, prediction_pred, prediction_gt, p_l = self.run_single_step(self.batch)

                question_array = batch_chunk['q']
                answer_array = batch_chunk['a']

                img = batch_chunk['img'][0]
                img *= self.img_std
                img += self.img_mean
                img = img.astype(np.uint8)

                nonrelational_indx = np.argmax(question_array[:,30:],axis=1) < 2
                relational_indx = np.argmax(question_array[:,30:],axis=1) > 1

                relational_pred_ans = prediction_pred[relational_indx]
                relational_ans = answer_array[relational_indx]

                nonrelational_pred_ans = prediction_pred[nonrelational_indx]
                nonrelational_ans = answer_array[nonrelational_indx]

                nonrelational_correct = np.sum( np.argmax(nonrelational_pred_ans,axis=1) == np.argmax(nonrelational_ans,axis=1) )
                relational_correct = np.sum( np.argmax(relational_pred_ans,axis=1) == np.argmax(relational_ans,axis=1) )

                if self.config.location:
                    p_l = p_l
                    p_l *= self.coords_std
                    p_l += self.coords_mean

                    location = batch_chunk['l']
                    location *= self.coords_std
                    location += self.coords_mean

                    iou = self.IoU(p_l,location)
                    print iou
                    r_iou = iou[relational_indx].tolist()
                    nr_iou = iou[nonrelational_indx].tolist()
                    print r_iou,nr_iou,relational_indx, nonrelational_indx
                    print 'IoU:',np.mean(iou)
                else:
                    r_iou, nr_iou = 0,0

                evaler.add_batch([relational_correct,len(relational_ans)], [nonrelational_correct, len(nonrelational_ans)],r_iou,nr_iou)

                if self.config.visualize:
                    q = np.argmax(question_array[0][30:])
                    a = np.argmax(answer_array[0])
                    p_a = np.argmax(prediction_pred[0])
                    obj = np.argmax(question_array[0][:15])

                    visualize_prediction(img, q, ans_look_up[a], ans_look_up[p_a], location[0],p_l[0],obj_look_up[obj], id=s)

                self.log_step_message(s, acc, step_time)

        except Exception as e:
            coord.request_stop(e)

        coord.request_stop()
        try:
            coord.join(threads, stop_grace_period_secs=3)
        except RuntimeError as e:
            log.warn(str(e))

        evaler.report()
        log.infov("Evaluation complete.")
示例#3
0
net.load_state_dict(torch.load(PATH))
print('load parameters successfully')

print('for train')
net.eval()  # turn on eval, switch off dropout layer
with torch.no_grad():
    y_predict = net(Tensor(x_train))
y_predict = y_predict[:, 0]
predict = torch.ones_like(y_predict)
predict[y_predict < 0] = -1
analysis(x_train0, predict.numpy(), y_train)
y_train[y_train == -1] = 0.
predict[predict == -1] = 0.
vize.visualize_sample(x_train0[seqLen - 1::seqLen], y_train)
vize.visualize_prediction(x_train0[seqLen - 1::seqLen], y_train,
                          predict.numpy())

print('for validate')
net.eval()  # turn on eval, switch off dropout layer
with torch.no_grad():
    y_predict = net(Tensor(x_validate))
y_predict = y_predict[:, 0]
predict = torch.ones_like(y_predict)
predict[y_predict < 0] = -1
analysis(x_validate0, predict.numpy(), y_validate)
y_validate[y_validate == -1] = 0.
predict[predict == -1] = 0.
vize.visualize_sample(x_validate0[seqLen - 1::seqLen], y_validate)
vize.visualize_prediction(x_validate0[seqLen - 1::seqLen], y_validate,
                          predict.numpy())
#
示例#4
0
def test(args):
    # data transforms
    input_transform = transform.Compose([
        transform.ToTensor(),
        transform.Normalize([.485, .456, .406], [.229, .224, .225])
    ])

    # dataset
    if args.eval:  # set split='val' for validation set testing
        testset = get_edge_dataset(args.dataset,
                                   split='val',
                                   mode='testval',
                                   transform=input_transform,
                                   crop_size=args.crop_size)
    else:  # set split='vis' for visulization
        testset = get_edge_dataset(args.dataset,
                                   split='vis',
                                   mode='vis',
                                   transform=input_transform,
                                   crop_size=args.crop_size)

    # output folder
    if args.eval:
        outdir_list_side5 = []
        outdir_list_fuse = []
        for i in range(testset.num_class):
            outdir_side5 = '%s/%s/%s_val/side5/class_%03d' % (
                args.dataset, args.model, args.checkname, i + 1)
            if not os.path.exists(outdir_side5):
                os.makedirs(outdir_side5)
            outdir_list_side5.append(outdir_side5)

            outdir_fuse = '%s/%s/%s_val/fuse/class_%03d' % (
                args.dataset, args.model, args.checkname, i + 1)
            if not os.path.exists(outdir_fuse):
                os.makedirs(outdir_fuse)
            outdir_list_fuse.append(outdir_fuse)

    else:
        outdir = '%s/%s/%s_vis' % (args.dataset, args.model, args.checkname)
        if not os.path.exists(outdir):
            os.makedirs(outdir)

    # dataloader
    loader_kwargs = {'num_workers': args.workers, 'pin_memory': True} \
        if args.cuda else {}
    test_data = data.DataLoader(testset,
                                batch_size=args.test_batch_size,
                                drop_last=False,
                                shuffle=False,
                                collate_fn=test_batchify_fn,
                                **loader_kwargs)

    model = get_edge_model(
        args.model,
        dataset=args.dataset,
        backbone=args.backbone,
        norm_layer=BatchNorm2d,
        crop_size=args.crop_size,
    )

    # resuming checkpoint
    if args.resume is None or not os.path.isfile(args.resume):
        raise RuntimeError("=> no checkpoint found at '{}'".format(
            args.resume))
    checkpoint = torch.load(args.resume)
    # strict=False, so that it is compatible with old pytorch saved models
    model.load_state_dict(checkpoint['state_dict'], strict=False)

    if args.cuda:
        model = DataParallelModel(model).cuda()
    print(model)

    model.eval()
    tbar = tqdm(test_data)

    if args.eval:
        for i, (images, im_paths, im_sizes) in enumerate(tbar):
            with torch.no_grad():
                images = [image.unsqueeze(0) for image in images]
                images = torch.cat(images, 0)
                outputs = model(images.float())

                num_gpus = len(os.environ['CUDA_VISIBLE_DEVICES'].split(','))
                if num_gpus == 1:
                    outputs = [outputs]

                # extract the side5 output and fuse output from outputs
                side5_list = []
                fuse_list = []
                for i in range(len(outputs)):  #iterate for n (gpu counts)
                    im_size = tuple(im_sizes[i].numpy())
                    output = outputs[i]

                    side5 = output[0].squeeze_()
                    side5 = side5.sigmoid_().cpu().numpy()
                    side5 = side5[:, 0:im_size[1], 0:im_size[0]]

                    fuse = output[1].squeeze_()
                    fuse = fuse.sigmoid_().cpu().numpy()
                    fuse = fuse[:, 0:im_size[1], 0:im_size[0]]

                    side5_list.append(side5)
                    fuse_list.append(fuse)

                for predict, impath in zip(side5_list, im_paths):
                    for i in range(predict.shape[0]):
                        predict_c = predict[i]
                        path = os.path.join(outdir_list_side5[i], impath)
                        io.imsave(path, predict_c)

                for predict, impath in zip(fuse_list, im_paths):
                    for i in range(predict.shape[0]):
                        predict_c = predict[i]
                        path = os.path.join(outdir_list_fuse[i], impath)
                        io.imsave(path, predict_c)
    else:
        for i, (images, masks, im_paths, im_sizes) in enumerate(tbar):
            with torch.no_grad():
                images = [image.unsqueeze(0) for image in images]
                images = torch.cat(images, 0)
                outputs = model(images.float())

                num_gpus = len(os.environ['CUDA_VISIBLE_DEVICES'].split(','))
                if num_gpus == 1:
                    outputs = [outputs]

                # extract the side5 output and fuse output from outputs
                side5_list = []
                fuse_list = []
                for i in range(len(outputs)):  #iterate for n (gpu counts)
                    im_size = tuple(im_sizes[i].numpy())
                    output = outputs[i]

                    side5 = output[0].squeeze_()
                    side5 = side5.sigmoid_().cpu().numpy()
                    side5 = side5[:, 0:im_size[1], 0:im_size[0]]

                    fuse = output[1].squeeze_()
                    fuse = fuse.sigmoid_().cpu().numpy()
                    fuse = fuse[:, 0:im_size[1], 0:im_size[0]]

                    side5_list.append(side5)
                    fuse_list.append(fuse)

                # visualize ground truth
                for gt, impath in zip(masks, im_paths):
                    outname = os.path.splitext(impath)[0] + '_gt.png'
                    path = os.path.join(outdir, outname)
                    visualize_prediction(args.dataset, path, gt)

                # visualize side5 output
                for predict, impath in zip(side5_list, im_paths):
                    outname = os.path.splitext(impath)[0] + '_side5.png'
                    path = os.path.join(outdir, outname)
                    visualize_prediction(args.dataset, path, predict)

                # visualize fuse output
                for predict, impath in zip(fuse_list, im_paths):
                    outname = os.path.splitext(impath)[0] + '_fuse.png'
                    path = os.path.join(outdir, outname)
                    visualize_prediction(args.dataset, path, predict)